commit 772a90e3913dea67e987fa12837fb57e335d77c3 Author: Tsubajashi Date: Wed Nov 19 01:52:34 2025 +0100 hopes and dreams diff --git a/AX900 Deb驱动安装教程_Linux_V3.0.pdf b/AX900 Deb驱动安装教程_Linux_V3.0.pdf new file mode 100644 index 0000000..b718fce Binary files /dev/null and b/AX900 Deb驱动安装教程_Linux_V3.0.pdf differ diff --git a/aic8800d80fdrvpackage.deb b/aic8800d80fdrvpackage.deb new file mode 100644 index 0000000..280fc7e Binary files /dev/null and b/aic8800d80fdrvpackage.deb differ diff --git a/control.tar.gz b/control.tar.gz new file mode 100644 index 0000000..a2a58c2 Binary files /dev/null and b/control.tar.gz differ diff --git a/data.tar.gz b/data.tar.gz new file mode 100644 index 0000000..6b306c4 Binary files /dev/null and b/data.tar.gz differ diff --git a/debian-binary b/debian-binary new file mode 100644 index 0000000..cd5ac03 --- /dev/null +++ b/debian-binary @@ -0,0 +1 @@ +2.0 diff --git a/usr/src/AIC8800/aic.rules b/usr/src/AIC8800/aic.rules new file mode 100644 index 0000000..83c06f8 --- /dev/null +++ b/usr/src/AIC8800/aic.rules @@ -0,0 +1,4 @@ +KERNEL=="sd*", ATTRS{idVendor}=="a69c", ATTRS{idProduct}=="5721", SYMLINK+="aicudisk", RUN+="/usr/bin/eject /dev/%k" +KERNEL=="sd*", ATTRS{idVendor}=="a69c", ATTRS{idProduct}=="5723", SYMLINK+="tendaudisk", RUN+="/usr/bin/eject /dev/%k" +KERNEL=="sd*", ATTRS{idVendor}=="a69c", ATTRS{idProduct}=="5724", SYMLINK+="ugreenudisk", RUN+="/usr/bin/eject /dev/%k" + diff --git a/usr/src/AIC8800/drivers/aic8800/Kconfig b/usr/src/AIC8800/drivers/aic8800/Kconfig new file mode 100644 index 0000000..44970d1 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/Kconfig @@ -0,0 +1,10 @@ +config AIC_WLAN_SUPPORT + bool "AIC wireless Support" + default n + help + This is support for aic wireless chip. + +if AIC_WLAN_SUPPORT +source "drivers/net/wireless/aic8800/aic8800_fdrv/Kconfig" +source "drivers/net/wireless/aic8800/aic_load_fw/Kconfig" +endif diff --git a/usr/src/AIC8800/drivers/aic8800/Makefile b/usr/src/AIC8800/drivers/aic8800/Makefile new file mode 100644 index 0000000..71b2fad --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/Makefile @@ -0,0 +1,80 @@ +CONFIG_AIC_LOADFW_SUPPORT := m +CONFIG_AIC8800_WLAN_SUPPORT := m + +obj-$(CONFIG_AIC_LOADFW_SUPPORT) += aic_load_fw/ +obj-$(CONFIG_AIC8800_WLAN_SUPPORT) += aic8800_fdrv/ + +########## config option ########## +export CONFIG_USE_FW_REQUEST = n +export CONFIG_PREALLOC_RX_SKB = y +export CONFIG_PREALLOC_TXQ = y +################################### + +########## platform support list ########## +export CONFIG_PLATFORM_ROCKCHIP = n +export CONFIG_PLATFORM_ALLWINNER = n +export CONFIG_PLATFORM_AMLOGIC = n +export CONFIG_PLATFORM_HI = n +export CONFIG_PLATFORM_UBUNTU = y + +ifeq ($(CONFIG_PLATFORM_ROCKCHIP), y) +ARCH = arm64 +KDIR = /home/yaya/E/Rockchip/3566/firefly/Android11.0/Firefly-RK356X_Android11.0_git_20210824/RK356X_Android11.0/kernel +CROSS_COMPILE = /home/yaya/E/Rockchip/3566/firefly/Android11.0/Firefly-RK356X_Android11.0_git_20210824/RK356X_Android11.0/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- +ccflags-y += -DANDROID_PLATFORM +endif + +ifeq ($(CONFIG_PLATFORM_ALLWINNER), y) +KDIR = /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/kernel/linux-4.9 +ARCH = arm64 +CROSS_COMPILE = /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/out/gcc-linaro-5.3.1-2016.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- +ccflags-y += -DANDROID_PLATFORM +endif + +ifeq ($(CONFIG_PLATFORM_AMLOGIC), y) +ccflags-y += -DANDROID_PLATFORM +ARCH = arm +CROSS_COMPILE = /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel- +KDIR = /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/ +endif + +ifeq ($(CONFIG_PLATFORM_HI), y) +ccflags-y += -DANDROID_PLATFORM +ARCH = arm +CROSS_COMPILE = /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel- +KDIR = /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/ +endif + +ifeq ($(CONFIG_PLATFORM_UBUNTU), y) +KDIR = /lib/modules/$(shell uname -r)/build +PWD = $(shell pwd) +KVER = $(shell uname -r) +MODDESTDIR = /lib/modules/$(KVER)/kernel/drivers/net/wireless/aic8800 +SUBARCH = $(shell uname -m | sed -e s/i.86/i386/ -e s/armv.l/arm/ -e s/aarch64/arm64/ -e s/loongarch64/loongarch/ -e s/loong64/loongarch/) +ARCH ?= $(SUBARCH) +CROSS_COMPILE ?= +endif + +########################################### + +MAKEFLAGS +=-j$(shell nproc) + +all: modules +modules: + make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules + +install: + mkdir -p $(MODDESTDIR) + install -p -m 644 aic_load_fw/aic_load_fw.ko $(MODDESTDIR)/ + install -p -m 644 aic8800_fdrv/aic8800_fdrv.ko $(MODDESTDIR)/ + /sbin/depmod -a ${KVER} + +uninstall: + rm -rfv $(MODDESTDIR)/aic_load_fw.ko + rm -rfv $(MODDESTDIR)/aic8800_fdrv.ko + /sbin/depmod -a ${KVER} + +clean: + cd aic_load_fw/;make clean;cd .. + cd aic8800_fdrv/;make clean;cd .. + rm -rf modules.order Module.symvers .modules.order.cmd .Module.symvers.cmd .tmp_versions/ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/Kconfig b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/Kconfig new file mode 100644 index 0000000..92d5986 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/Kconfig @@ -0,0 +1,36 @@ +config AIC8800_WLAN_SUPPORT + tristate "AIC8800 wlan Support" + help + This is support for aic wifi driver. + +#choice +# depends on AIC8800_WLAN_SUPPORT +# prompt "Select 8800 support platform" +# +# config PLATFORM_ROCHCHIP +# bool "PLATFORM_ROCHCHIP" +# depends on AIC8800_WLAN_SUPPORT +# +# config PLATFORM_ALLWINNER +# bool "PLATFORM_ALLWINNER" +# depends on AIC8800_WLAN_SUPPORT +# +# config PLATFORM_AMLOGIC +# bool "PLATFORM_AMLOGIC" +# depends on AIC8800_WLAN_SUPPORT +# +# config PLATFORM_UBUNTU +# bool "PLATFORM_UBUNTU" +# depends on AIC8800_WLAN_SUPPORT +# +#endchoice +# +#config USE_5G +# bool "AIC8800 force enable 5G" +# ---help--- +# This is parameter that force enable 5G. +# +#config HE_FOR_OLD_KERNEL +# bool "AIC8800 support AX for old kernel" +# ---help--- +# This is parameter that enable AX for old kernel. diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/Makefile b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/Makefile new file mode 100644 index 0000000..d6f2052 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/Makefile @@ -0,0 +1,412 @@ +EXTRA_CFLAGS += $(USER_EXTRA_CFLAGS) +EXTRA_CFLAGS += -Wno-implicit-fallthrough +#EXTRA_CFLAGS += -Wno-unused-variable + +RWNX_VERS_NUM := 6.4.3.0 + +CONFIG_AIC8800_WLAN_SUPPORT = m +MODULE_NAME = aic8800_fdrv +CONFIG_COUNTRY_CODE = "00" + +# Support of bootrom start +CONFIG_START_FROM_BOOTROM = y + +# Support of pmic setting, new version bootrom avaliable +CONFIG_PMIC_SETTING ?=n + +# Select 8800DC/DW DCDC_VRF mode, check your board +CONFIG_VRF_DCDC_MODE = y + +# ROM patch enabled option +CONFIG_ROM_PATCH_EN ?=y + +# +# WAITING FOR KCONFIG { +# +CONFIG_RWNX_SOFTMAC ?= n +CONFIG_RWNX_FULLMAC ?= y +CONFIG_RWNX_FHOST ?= n + +# +# DEBUG OPTIONS +CONFIG_RWNX_UM_HELPER_DFLT ?= "/dini/dini_bin/rwnx_umh.sh" +CONFIG_AIC_FW_PATH = "/vendor/etc/firmware" +export CONFIG_AIC_FW_PATH + +# +# FW ARCH: +CONFIG_RWNX_SDM ?= n +CONFIG_RWNX_TL4 ?= n + +# IPC version +CONFIG_RWNX_OLD_IPC ?= n + +# Support of P2P DebugFS for enabling/disabling NoA and OppPS +CONFIG_RWNX_P2P_DEBUGFS ?= y + +# +# } // WAITING FOR KCONFIG +# + +# Enable A-MSDU support (need FW support) +## Select this if FW is compiled with AMSDU support +CONFIG_RWNX_SPLIT_TX_BUF ?= n +## Select this TO send AMSDU +CONFIG_RWNX_AMSDUS_TX ?= n + +# Enable BFMER support (need FW support) +CONFIG_RWNX_BFMER ?= n + +CONFIG_SDIO_SUPPORT =n +CONFIG_USB_SUPPORT =y +CONFIG_RX_REORDER =y +CONFIG_ARP_OFFLOAD =y +CONFIG_USE_5G = y +CONFIG_RADAR_OR_IR_DETECT =n +CONFIG_DOWNLOAD_FW =n +CONFIG_RFTEST=y +CONFIG_USB_BT=y +CONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE = y +CONFIG_WPA3_FOR_OLD_KERNEL = n +CONFIG_HE_FOR_OLD_KERNEL = n +CONFIG_VHT_FOR_OLD_KERNEL = n +# CONFIG_COEX = n for BT_ONLY, CONFIG_COEX =y for combo and sw +CONFIG_COEX = y +CONFIG_ALIGN_8BYTES = y +CONFIG_TXRX_THREAD_PRIO = y +CONFIG_USB_ALIGN_DATA = y +CONFIG_RX_TASKLET = n +CONFIG_TX_TASKLET = n +CONFIG_RX_NETIF_RECV_SKB = y +CONFIG_BR_SUPPORT = n +CONFIG_USB_MSG_OUT_EP = y +CONFIG_USB_MSG_IN_EP = y +CONFIG_USB_RX_REASSEMBLE = n +CONFIG_WOWLAN = n + +#DCDW support tx aggr, D80 support both +CONFIG_USB_RX_AGGR = n +CONFIG_USB_TX_AGGR = n + +CONFIG_USB_NO_TRANS_DMA_MAP = n +CONFIG_GPIO_WAKEUP = n +CONFIG_CREATE_TRACE_POINTS = n +CONFIG_SUPPORT_REALTIME_CHANGE_MAC = y +CONFIG_USE_USB_ZERO_PACKET = y +CONFIG_DEBUG_FS = n +CONFIG_STA_SCAN_WHEN_P2P_WORKING = y +CONFIG_SET_VENDOR_EXTENSION_IE = n +CONFIG_VENDOR_GPIO = n +CONFIG_FWLOG_EN = n +CONFIG_FOR_IPCAM = n +CONFIG_5M10M = n +# Need to set fw path in BOARD_KERNEL_CMDLINE +CONFIG_USE_FW_REQUEST ?= n +CONFIG_USE_P2P0 = n +CONFIG_ONE_TXQ = n +CONFIG_PER_STA_FC = n +CONFIG_PREALLOC_RX_SKB ?= n +CONFIG_PREALLOC_TXQ ?= y +CONFIG_USE_WIRELESS_EXT = y +CONFIG_DPD = y +CONFIG_FORCE_DPD_CALIB = y +CONFIG_LOFT_CALIB = n +CONFIG_GKI = n +CONFIG_SCHED_SCAN = n +CONFIG_TEMP_COMP = n +CONFIG_POWER_LIMIT = n +CONFIG_EXT_FEM_8800DCDW = n +# CONFIG_MCC = n for sta and p2p concurrent in same channel. +CONFIG_MCC = y +CONFIG_LOAD_BT_PATCH_IN_FDRV = n +CONFIG_DYNAMIC_PWR = n +CONFIG_DYNAMIC_PERPWR = n +CONFIG_BAND_STEERING = n +CONFIG_PRBREQ_REPORT = n + +#support D80X2 can write rf result to file +CONFIG_WRITE_FILE_D80X2 = n + +ifneq ($(CONFIG_WIRELESS_EXT), y) +CONFIG_USE_WIRELESS_EXT = n +endif + +ifeq ($(CONFIG_EXT_FEM_8800DCDW), y) +CONFIG_DPD = n +CONFIG_FORCE_DPD_CALIB = n +CONFIG_LOFT_CALIB = y +endif + +# Support of MU-MIMO transmission (need FW support) +ifeq ($(CONFIG_RWNX_BFMER), y) +CONFIG_RWNX_MUMIMO_TX ?= n +else +CONFIG_RWNX_MUMIMO_TX = n +endif + +# Enable handling of radar event +CONFIG_RWNX_RADAR ?= y + +# Enable HW queue for Broadcast/Multicast traffic (need FW support) +CONFIG_RWNX_BCMC ?= y + +# Enable Monitor+Data interface support (need FW support) +CONFIG_RWNX_MON_DATA =n +CONFIG_RWNX_MON_XMIT ?= n +CONFIG_RWNX_MON_RXFILTER ?= n + +CONFIG_FILTER_TCP_ACK =y + +# extra DEBUG config +CONFIG_RWNX_SW_PROFILING ?= n +CONFIG_RWNX_DBG ?= y +BR_NAME = br0 + +obj-$(CONFIG_AIC8800_WLAN_SUPPORT) := $(MODULE_NAME).o +$(MODULE_NAME)-y := \ + rwnx_wakelock.o \ + rwnx_msg_tx.o \ + rwnx_msg_rx.o \ + rwnx_utils.o \ + rwnx_cmds.o \ + rwnx_irqs.o \ + rwnx_cfgfile.o \ + rwnx_strs.o \ + rwnx_rx.o \ + rwnx_tx.o \ + rwnx_txq.o \ + rwnx_main.o \ + rwnx_mod_params.o \ + rwnx_mesh.o \ + rwnx_platform.o \ + rwnx_pci.o \ + rwnx_dini.o \ + rwnx_v7.o \ + ipc_host.o \ + rwnx_tdls.o \ + regdb.o \ + md5.o \ + aic_vendor.o \ + aic_priv_cmd.o \ + aicwf_compat_8800dc.o \ + aicwf_compat_8800d80.o \ + aicwf_compat_8800d80x2.o + +$(MODULE_NAME)-$(CONFIG_BAND_STEERING) += aicwf_manager.o \ + aicwf_steering.o +$(MODULE_NAME)-$(CONFIG_BR_SUPPORT) += aic_br_ext.o +$(MODULE_NAME)-$(CONFIG_RWNX_RADAR) += rwnx_radar.o +$(MODULE_NAME)-$(CONFIG_DEBUG_FS) += rwnx_debugfs.o +$(MODULE_NAME)-$(CONFIG_DEBUG_FS) += rwnx_fw_trace.o +$(MODULE_NAME)-$(CONFIG_NL80211_TESTMODE) += rwnx_testmode.o +$(MODULE_NAME)-$(CONFIG_RWNX_BFMER) += rwnx_bfmer.o +$(MODULE_NAME)-$(CONFIG_RWNX_MUMIMO_TX) += rwnx_mu_group.o +$(MODULE_NAME)-$(CONFIG_SDIO_SUPPORT) += sdio_host.o +$(MODULE_NAME)-$(CONFIG_SDIO_SUPPORT) += aicwf_txrxif.o +$(MODULE_NAME)-$(CONFIG_SDIO_SUPPORT) += aicwf_sdio.o +$(MODULE_NAME)-$(CONFIG_FILTER_TCP_ACK) += aicwf_tcp_ack.o + +$(MODULE_NAME)-$(CONFIG_USB_SUPPORT) += usb_host.o +$(MODULE_NAME)-$(CONFIG_USB_SUPPORT) += aicwf_txrxif.o +$(MODULE_NAME)-$(CONFIG_USB_SUPPORT) += aicwf_usb.o +$(MODULE_NAME)-$(CONFIG_USE_WIRELESS_EXT) += aicwf_wext_linux.o +$(MODULE_NAME)-$(CONFIG_GKI) += rwnx_gki.o + +ccflags-$(CONFIG_DEBUG_FS) += -DCONFIG_RWNX_DEBUGFS +ccflags-$(CONFIG_DEBUG_FS) += -DCONFIG_RWNX_UM_HELPER_DFLT=\"$(CONFIG_RWNX_UM_HELPER_DFLT)\" +ccflags-$(CONFIG_RWNX_P2P_DEBUGFS) += -DCONFIG_RWNX_P2P_DEBUGFS + +# FW VARS +ccflags-y += -DNX_VIRT_DEV_MAX=4 + +#for 8800D and DCDW u01 +#ccflags-y += -DNX_REMOTE_STA_MAX=8 + +#for 8800DCDW u02 +ccflags-y += -DNX_REMOTE_STA_MAX_FOR_OLD_IC=8 +ccflags-y += -DNX_REMOTE_STA_MAX=32 + +ccflags-y += -DNX_MU_GROUP_MAX=62 +ccflags-y += -DNX_TXDESC_CNT=64 +ccflags-y += -DNX_TX_MAX_RATES=4 +ccflags-y += -DNX_CHAN_CTXT_CNT=3 + +# FW ARCH: +ccflags-$(CONFIG_RWNX_SDM) += -DCONFIG_RWNX_SDM +ccflags-$(CONFIG_RWNX_TL4) += -DCONFIG_RWNX_TL4 +ccflags-$(CONFIG_RWNX_OLD_IPC) += -DCONFIG_RWNX_OLD_IPC +ccflags-$(CONFIG_START_FROM_BOOTROM) += -DCONFIG_START_FROM_BOOTROM +ccflags-$(CONFIG_PMIC_SETTING) += -DCONFIG_PMIC_SETTING +ccflags-$(CONFIG_VRF_DCDC_MODE) += -DCONFIG_VRF_DCDC_MODE +ccflags-$(CONFIG_ROM_PATCH_EN) += -DCONFIG_ROM_PATCH_EN +ccflags-$(CONFIG_WPA3_FOR_OLD_KERNEL) += -DCONFIG_WPA3_FOR_OLD_KERNEL +ccflags-$(CONFIG_HE_FOR_OLD_KERNEL) += -DCONFIG_HE_FOR_OLD_KERNEL +ccflags-$(CONFIG_VHT_FOR_OLD_KERNEL) += -DCONFIG_VHT_FOR_OLD_KERNEL +ccflags-$(CONFIG_COEX) += -DCONFIG_COEX + +ccflags-y += -DCONFIG_RWNX_FULLMAC +ccflags-y += -I$(src)/. +ccflags-$(CONFIG_RWNX_RADAR) += -DCONFIG_RWNX_RADAR +ccflags-$(CONFIG_RWNX_MON_DATA) += -DCONFIG_RWNX_MON_DATA +ccflags-$(CONFIG_RWNX_MON_XMIT) += -DCONFIG_RWNX_MON_XMIT +ccflags-$(CONFIG_RWNX_MON_RXFILTER) += -DCONFIG_RWNX_MON_RXFILTER +ccflags-$(CONFIG_RWNX_BFMER) += -DCONFIG_RWNX_BFMER +ccflags-$(CONFIG_RWNX_SPLIT_TX_BUF) += -DCONFIG_RWNX_SPLIT_TX_BUF +ifeq ($(CONFIG_RWNX_SPLIT_TX_BUF), y) +ccflags-$(CONFIG_RWNX_AMSDUS_TX) += -DCONFIG_RWNX_AMSDUS_TX +endif +ccflags-$(CONFIG_RWNX_DBG) += -DCONFIG_RWNX_DBG +ccflags-$(CONFIG_RWNX_SW_PROFILING) += -DCONFIG_RWNX_SW_PROFILING +ccflags-$(CONFIG_RWNX_MUMIMO_TX) += -DCONFIG_RWNX_MUMIMO_TX +ccflags-$(CONFIG_RFTEST) += -DCONFIG_RFTEST +ccflags-$(CONFIG_MCC) += -DCONFIG_MCC +ccflags-$(CONFIG_LOAD_BT_PATCH_IN_FDRV) += -DCONFIG_LOAD_BT_PATCH_IN_FDRV +ccflags-$(CONFIG_DYNAMIC_PWR) += -DCONFIG_DYNAMIC_PWR +ccflags-$(CONFIG_DYNAMIC_PERPWR) += -DCONFIG_DYNAMIC_PERPWR +ccflags-$(CONFIG_BAND_STEERING) += -DCONFIG_BAND_STEERING +ccflags-$(CONFIG_PRBREQ_REPORT) += -DCONFIG_PRBREQ_REPORT +ccflags-$(CONFIG_WRITE_FILE_D80X2) += -DRF_WRITE_FILE + +ifeq ($(CONFIG_SDIO_SUPPORT), y) +ccflags-y += -DAICWF_SDIO_SUPPORT +endif + +ifeq ($(CONFIG_USB_SUPPORT), y) +ccflags-y += -DAICWF_USB_SUPPORT +endif +ifeq ($(CONFIG_BR_SUPPORT), y) +ccflags-y += -DCONFIG_BR_SUPPORT +ccflags-y += '-DCONFIG_BR_SUPPORT_BRNAME="'$(BR_NAME)'"' +endif + +ifeq ($(CONFIG_RWNX_MUMIMO_TX), y) +ccflags-y += -DCONFIG_USER_MAX=2 +else +ccflags-y += -DCONFIG_USER_MAX=1 +endif + +ifeq ($(CONFIG_RWNX_BCMC), y) +ccflags-y += -DNX_TXQ_CNT=5 +else +ccflags-y += -DNX_TXQ_CNT=4 +endif + +# For old kernel (<=3.19) +ifeq ($(shell test $(VERSION) -lt 4 -a "$(CONFIG_VENDOR_RWNX)" = y ; echo $$?),0) +ccflags-y += -DCONFIG_VENDOR_RWNX_VHT_NO80 +endif + +ccflags-$(CONFIG_RX_REORDER) += -DAICWF_RX_REORDER +ccflags-$(CONFIG_ARP_OFFLOAD) += -DAICWF_ARP_OFFLOAD +ccflags-$(CONFIG_USE_5G) += -DUSE_5G +ccflags-$(CONFIG_RADAR_OR_IR_DETECT) += -DCONFIG_RADAR_OR_IR_DETECT +ccflags-$(CONFIG_DOWNLOAD_FW) += -DCONFIG_DOWNLOAD_FW +ccflags-$(CONFIG_USB_BT) += -DCONFIG_USB_BT +ccflags-$(CONFIG_ALIGN_8BYTES) += -DCONFIG_ALIGN_8BYTES +ccflags-$(CONFIG_TXRX_THREAD_PRIO) += -DCONFIG_TXRX_THREAD_PRIO +ccflags-$(CONFIG_USB_ALIGN_DATA) += -DCONFIG_USB_ALIGN_DATA +ccflags-$(CONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE) += -DCONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE +ccflags-$(CONFIG_VENDOR_GPIO) += -DCONFIG_VENDOR_GPIO +ccflags-y += -DDEFAULT_COUNTRY_CODE=""\$(CONFIG_COUNTRY_CODE)"\" +ccflags-$(CONFIG_RX_NETIF_RECV_SKB) += -DCONFIG_RX_NETIF_RECV_SKB +ccflags-$(CONFIG_USB_MSG_OUT_EP) += -DCONFIG_USB_MSG_OUT_EP +ccflags-$(CONFIG_USB_MSG_IN_EP) += -DCONFIG_USB_MSG_IN_EP +ccflags-$(CONFIG_USB_RX_REASSEMBLE) += -DCONFIG_USB_RX_REASSEMBLE +ccflags-$(CONFIG_USB_RX_AGGR) += -DCONFIG_USB_RX_AGGR +ccflags-$(CONFIG_USB_TX_AGGR) += -DCONFIG_USB_TX_AGGR +ccflags-$(CONFIG_USB_NO_TRANS_DMA_MAP) += -DCONFIG_USB_NO_TRANS_DMA_MAP +ccflags-$(CONFIG_GPIO_WAKEUP) += -DCONFIG_GPIO_WAKEUP +ccflags-$(CONFIG_CREATE_TRACE_POINTS) += -DCREATE_TRACE_POINTS +ccflags-$(CONFIG_RX_TASKLET) += -DCONFIG_RX_TASKLET +ccflags-$(CONFIG_TX_TASKLET) += -DCONFIG_TX_TASKLET +ccflags-$(CONFIG_USE_USB_ZERO_PACKET) += -DCONFIG_USE_USB_ZERO_PACKET +ccflags-$(CONFIG_STA_SCAN_WHEN_P2P_WORKING) += -DCONFIG_STA_SCAN_WHEN_P2P_WORKING +ccflags-$(CONFIG_SUPPORT_REALTIME_CHANGE_MAC) += -DCONFIG_SUPPORT_REALTIME_CHANGE_MAC +ccflags-$(CONFIG_SET_VENDOR_EXTENSION_IE) += -DCONFIG_SET_VENDOR_EXTENSION_IE +ccflags-$(CONFIG_FWLOG_EN) += -DCONFIG_FWLOG_EN +ccflags-$(CONFIG_FOR_IPCAM) += -DCONFIG_FOR_IPCAM -DCONFIG_ONE_TXQ +ccflags-$(CONFIG_5M10M) += -DCONFIG_5M10M +ccflags-$(CONFIG_USE_FW_REQUEST) += -DCONFIG_USE_FW_REQUEST +ccflags-$(CONFIG_USE_P2P0) += -DCONFIG_USE_P2P0 +ccflags-$(CONFIG_ONE_TXQ) += -DCONFIG_ONE_TXQ +ccflags-$(CONFIG_PER_STA_FC) += -DCONFIG_PER_STA_FC +ccflags-$(CONFIG_PREALLOC_RX_SKB) += -DCONFIG_PREALLOC_RX_SKB +ccflags-$(CONFIG_PREALLOC_TXQ) += -DCONFIG_PREALLOC_TXQ +ccflags-$(CONFIG_USE_WIRELESS_EXT) += -DCONFIG_USE_WIRELESS_EXT +ccflags-$(CONFIG_DPD) += -DCONFIG_DPD +ccflags-$(CONFIG_FORCE_DPD_CALIB) += -DCONFIG_FORCE_DPD_CALIB -DCONFIG_DPD +ccflags-$(CONFIG_LOFT_CALIB) += -DCONFIG_LOFT_CALIB +ccflags-$(CONFIG_GKI) += -DCONFIG_GKI +ccflags-$(CONFIG_SCHED_SCAN) += -DCONFIG_SCHED_SCAN +ccflags-$(CONFIG_FILTER_TCP_ACK) += -DCONFIG_FILTER_TCP_ACK +ccflags-$(CONFIG_TEMP_COMP) += -DCONFIG_TEMP_COMP +ccflags-$(CONFIG_POWER_LIMIT) += -DCONFIG_POWER_LIMIT +ccflags-$(CONFIG_EXT_FEM_8800DCDW) += -DCONFIG_EXT_FEM_8800DCDW +ccflags-$(CONFIG_WOWLAN) += -DCONFIG_WOWLAN +MAKEFLAGS +=-j$(shell nproc) + +# Platform support list +CONFIG_PLATFORM_ROCKCHIP ?= n +CONFIG_PLATFORM_ALLWINNER ?= n +CONFIG_PLATFORM_AMLOGIC ?= n +CONFIG_PLATFORM_HI ?= n +CONFIG_PLATFORM_UBUNTU ?= y + +ifeq ($(CONFIG_PLATFORM_ROCKCHIP), y) +ARCH := arm64 +KDIR ?= /home/yaya/E/Rockchip/3566/firefly/Android11.0/Firefly-RK356X_Android11.0_git_20210824/RK356X_Android11.0/kernel +CROSS_COMPILE := /home/yaya/E/Rockchip/3566/firefly/Android11.0/Firefly-RK356X_Android11.0_git_20210824/RK356X_Android11.0/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- +ccflags-y += -DANDROID_PLATFORM +ccflags-y += -DCONFIG_PLATFORM_ROCKCHIP +endif + +ifeq ($(CONFIG_PLATFORM_ALLWINNER), y) +KDIR ?= /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/kernel/linux-4.9 +ARCH ?= arm64 +CROSS_COMPILE ?= /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/out/gcc-linaro-5.3.1-2016.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- +ccflags-y += -DANDROID_PLATFORM +endif + +ifeq ($(CONFIG_PLATFORM_AMLOGIC), y) +ccflags-y += -DANDROID_PLATFORM +ARCH ?= arm +CROSS_COMPILE ?= /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel- +KDIR ?= /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/ +endif + +ifeq ($(CONFIG_PLATFORM_HI), y) +ccflags-y += -DANDROID_PLATFORM +ccflags-y += -DCONFIG_PLATFORM_HI +ARCH ?= arm +CROSS_COMPILE ?= /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel- +KDIR ?= /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/ +endif + +ifeq ($(CONFIG_PLATFORM_UBUNTU), y) +KDIR ?= /lib/modules/$(shell uname -r)/build +#KDIR ?= ~/D/Workspace/CyberQuantum/Linux/linux-4.15/ +PWD ?= $(shell pwd) +KVER ?= $(shell uname -r) +MODDESTDIR ?= /lib/modules/$(KVER)/kernel/drivers/net/wireless/aic8800 +SUBARCH = $(shell uname -m | sed -e s/i.86/i386/ -e s/armv.l/arm/ -e s/aarch64/arm64/) +ARCH ?= $(SUBARCH) +CROSS_COMPILE ?= +endif + + +all: modules +modules: +# make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) LLVM=1 LLVM_IAS=1 modules + make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules +install: + mkdir -p $(MODDESTDIR) + install -p -m 644 $(MODULE_NAME).ko $(MODDESTDIR) + /sbin/depmod -a ${KVER} + +uninstall: + rm -rfv $(MODDESTDIR)/$(MODULE_NAME).ko + /sbin/depmod -a ${KVER} + +clean: + rm -rf *.o *.ko *.o.* *.mod.* modules.* Module.* .a* .o* .*.o.* *.mod .tmp* .cache.mk .modules.order.cmd .Module.symvers.cmd + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_br_ext.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_br_ext.c new file mode 100644 index 0000000..dd038e3 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_br_ext.c @@ -0,0 +1,1569 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2017 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + *****************************************************************************/ +#define _AIC_BR_EXT_C_ + +#ifdef __KERNEL__ + #include + #include + #include + #include + #include + #include + #include "rwnx_defs.h" +#endif + +#ifdef CL_IPV6_PASS + #ifdef __KERNEL__ + #include + #include + #include + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)) + #include + #else + #include + #endif + #endif +#endif + +#ifdef CONFIG_BR_SUPPORT + +/* #define BR_SUPPORT_DEBUG */ + +#define NAT25_IPV4 01 +#define NAT25_IPV6 02 +#define NAT25_IPX 03 +#define NAT25_APPLE 04 +#define NAT25_PPPOE 05 + +#define RTL_RELAY_TAG_LEN (ETH_ALEN) +#define TAG_HDR_LEN 4 + +#define MAGIC_CODE 0x8186 +#define MAGIC_CODE_LEN 2 +#define WAIT_TIME_PPPOE 5 /* waiting time for pppoe server in sec */ + +/*----------------------------------------------------------------- + How database records network address: + 0 1 2 3 4 5 6 7 8 9 10 + |----|----|----|----|----|----|----|----|----|----|----| + IPv4 |type| | IP addr | + IPX |type| Net addr | Node addr | + IPX |type| Net addr |Sckt addr| + Apple |type| Network |node| + PPPoE |type| SID | AC MAC | +-----------------------------------------------------------------*/ + + +/* Find a tag in pppoe frame and return the pointer */ +static __inline__ unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned short type) +{ + unsigned char *cur_ptr, *start_ptr; + unsigned short tagLen, tagType; + + start_ptr = cur_ptr = (unsigned char *)ph->tag; + while ((cur_ptr - start_ptr) < ntohs(ph->length)) { + /* prevent un-alignment access */ + tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]); + tagLen = (unsigned short)((cur_ptr[2] << 8) + cur_ptr[3]); + if (tagType == type) + return cur_ptr; + cur_ptr = cur_ptr + TAG_HDR_LEN + tagLen; + } + return 0; +} + + +static __inline__ int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag) +{ + struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN); + int data_len; + + data_len = tag->tag_len + TAG_HDR_LEN; + if (skb_tailroom(skb) < data_len) { + printk("skb_tailroom() failed in add SID tag!\n"); + return -1; + } + + skb_put(skb, data_len); + /* have a room for new tag */ + memmove(((unsigned char *)ph->tag + data_len), (unsigned char *)ph->tag, ntohs(ph->length)); + ph->length = htons(ntohs(ph->length) + data_len); + memcpy((unsigned char *)ph->tag, tag, data_len); + return data_len; +} + +static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len) +{ + int tail_len; + unsigned long end, tail; + + if ((src + len) > skb_tail_pointer(skb) || skb->len < len) + return -1; + + tail = (unsigned long)skb_tail_pointer(skb); + end = (unsigned long)src + len; + if (tail < end) + return -1; + + tail_len = (int)(tail - end); + if (tail_len > 0) + memmove(src, src + len, tail_len); + + skb_trim(skb, skb->len - len); + return 0; +} + +static __inline__ unsigned long __nat25_timeout(struct rwnx_vif *vif) +{ + unsigned long timeout; + + timeout = jiffies - NAT25_AGEING_TIME * HZ; + + return timeout; +} + + +static __inline__ int __nat25_has_expired(struct rwnx_vif *vif, + struct nat25_network_db_entry *fdb) +{ + if (time_before_eq(fdb->ageing_timer, __nat25_timeout(vif))) + return 1; + + return 0; +} + + +static __inline__ void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr, + unsigned int *ipAddr) +{ + memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + + networkAddr[0] = NAT25_IPV4; + memcpy(networkAddr + 7, (unsigned char *)ipAddr, 4); +} + + +static __inline__ void __nat25_generate_ipx_network_addr_with_node(unsigned char *networkAddr, + unsigned int *ipxNetAddr, unsigned char *ipxNodeAddr) +{ + memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + + networkAddr[0] = NAT25_IPX; + memcpy(networkAddr + 1, (unsigned char *)ipxNetAddr, 4); + memcpy(networkAddr + 5, ipxNodeAddr, 6); +} + + +static __inline__ void __nat25_generate_ipx_network_addr_with_socket(unsigned char *networkAddr, + unsigned int *ipxNetAddr, unsigned short *ipxSocketAddr) +{ + memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + + networkAddr[0] = NAT25_IPX; + memcpy(networkAddr + 1, (unsigned char *)ipxNetAddr, 4); + memcpy(networkAddr + 5, (unsigned char *)ipxSocketAddr, 2); +} + + +static __inline__ void __nat25_generate_apple_network_addr(unsigned char *networkAddr, + unsigned short *network, unsigned char *node) +{ + memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + + networkAddr[0] = NAT25_APPLE; + memcpy(networkAddr + 1, (unsigned char *)network, 2); + networkAddr[3] = *node; +} + + +static __inline__ void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr, + unsigned char *ac_mac, unsigned short *sid) +{ + memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + + networkAddr[0] = NAT25_PPPOE; + memcpy(networkAddr + 1, (unsigned char *)sid, 2); + memcpy(networkAddr + 3, (unsigned char *)ac_mac, 6); +} + + +#ifdef CL_IPV6_PASS +static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr, + unsigned int *ipAddr) +{ + memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + + networkAddr[0] = NAT25_IPV6; + memcpy(networkAddr + 1, (unsigned char *)ipAddr, 16); +} + + +static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b) +{ + while (len > 0) { + if (*data == tag && *(data + 1) == len8b && len >= len8b * 8) + return data + 2; + + len -= (*(data + 1)) * 8; + data += (*(data + 1)) * 8; + } + return NULL; +} + + +static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char *replace_mac) +{ + struct icmp6hdr *icmphdr = (struct icmp6hdr *)data; + unsigned char *mac; + + if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) { + if (len >= 8) { + mac = scan_tlv(&data[8], len - 8, 1, 1); + if (mac) { + printk("Router Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]); + memcpy(mac, replace_mac, 6); + return 1; + } + } + } else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) { + if (len >= 16) { + mac = scan_tlv(&data[16], len - 16, 1, 1); + if (mac) { + printk("Router Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]); + memcpy(mac, replace_mac, 6); + return 1; + } + } + } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { + if (len >= 24) { + mac = scan_tlv(&data[24], len - 24, 1, 1); + if (mac) { + printk("Neighbor Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]); + memcpy(mac, replace_mac, 6); + return 1; + } + } + } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { + if (len >= 24) { + mac = scan_tlv(&data[24], len - 24, 2, 1); + if (mac) { + printk("Neighbor Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]); + memcpy(mac, replace_mac, 6); + return 1; + } + } + } else if (icmphdr->icmp6_type == NDISC_REDIRECT) { + if (len >= 40) { + mac = scan_tlv(&data[40], len - 40, 2, 1); + if (mac) { + printk("Redirect, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]); + memcpy(mac, replace_mac, 6); + return 1; + } + } + } + return 0; +} + +#ifdef SUPPORT_RX_UNI2MCAST +static void convert_ipv6_mac_to_mc(struct sk_buff *skb) +{ + struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + ETH_HLEN); + unsigned char *dst_mac = skb->data; + + /* dst_mac[0] = 0xff; */ + /* dst_mac[1] = 0xff; */ + /*modified by qinjunjie,ipv6 multicast address ix 0x33-33-xx-xx-xx-xx*/ + dst_mac[0] = 0x33; + dst_mac[1] = 0x33; + memcpy(&dst_mac[2], &iph->daddr.s6_addr32[3], 4); +#if defined(__LINUX_2_6__) + /*modified by qinjunjie,warning:should not remove next line*/ + skb->pkt_type = PACKET_MULTICAST; +#endif +} +#endif /* CL_IPV6_PASS */ +#endif /* SUPPORT_RX_UNI2MCAST */ + + +static __inline__ int __nat25_network_hash(unsigned char *networkAddr) +{ + if (networkAddr[0] == NAT25_IPV4) { + unsigned long x; + + x = networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10]; + + return x & (NAT25_HASH_SIZE - 1); + } else if (networkAddr[0] == NAT25_IPX) { + unsigned long x; + + x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ + networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10]; + + return x & (NAT25_HASH_SIZE - 1); + } else if (networkAddr[0] == NAT25_APPLE) { + unsigned long x; + + x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3]; + + return x & (NAT25_HASH_SIZE - 1); + } else if (networkAddr[0] == NAT25_PPPOE) { + unsigned long x; + + x = networkAddr[0] ^ networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8]; + + return x & (NAT25_HASH_SIZE - 1); + } +#ifdef CL_IPV6_PASS + else if (networkAddr[0] == NAT25_IPV6) { + unsigned long x; + + x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ + networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10] ^ + networkAddr[11] ^ networkAddr[12] ^ networkAddr[13] ^ networkAddr[14] ^ networkAddr[15] ^ + networkAddr[16]; + + return x & (NAT25_HASH_SIZE - 1); + } +#endif + else { + unsigned long x = 0; + int i; + + for (i = 0; i < MAX_NETWORK_ADDR_LEN; i++) + x ^= networkAddr[i]; + + return x & (NAT25_HASH_SIZE - 1); + } +} + + +static __inline__ void __network_hash_link(struct rwnx_vif *vif, + struct nat25_network_db_entry *ent, int hash) +{ + /* Caller must _enter_critical_bh already! */ + /* _irqL irqL; */ + /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */ + + ent->next_hash = vif->nethash[hash]; + if (ent->next_hash != NULL) + ent->next_hash->pprev_hash = &ent->next_hash; + vif->nethash[hash] = ent; + ent->pprev_hash = &vif->nethash[hash]; + + /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */ +} + + +static __inline__ void __network_hash_unlink(struct nat25_network_db_entry *ent) +{ + /* Caller must _enter_critical_bh already! */ + /* _irqL irqL; */ + /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */ + + *(ent->pprev_hash) = ent->next_hash; + if (ent->next_hash != NULL) + ent->next_hash->pprev_hash = ent->pprev_hash; + ent->next_hash = NULL; + ent->pprev_hash = NULL; + + /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */ +} + + +static int __nat25_db_network_lookup_and_replace(struct rwnx_vif *vif, + struct sk_buff *skb, unsigned char *networkAddr) +{ + struct nat25_network_db_entry *db; + spin_lock_bh(&vif->br_ext_lock); + + db = vif->nethash[__nat25_network_hash(networkAddr)]; + while (db != NULL) { + if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) { + if (!__nat25_has_expired(vif, db)) { + /* replace the destination mac address */ + memcpy(skb->data, db->macAddr, ETH_ALEN); + atomic_inc(&db->use_count); + +#ifdef CL_IPV6_PASS + printk("NAT25: Lookup M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" + "%02x%02x%02x%02x%02x%02x\n", + db->macAddr[0], + db->macAddr[1], + db->macAddr[2], + db->macAddr[3], + db->macAddr[4], + db->macAddr[5], + db->networkAddr[0], + db->networkAddr[1], + db->networkAddr[2], + db->networkAddr[3], + db->networkAddr[4], + db->networkAddr[5], + db->networkAddr[6], + db->networkAddr[7], + db->networkAddr[8], + db->networkAddr[9], + db->networkAddr[10], + db->networkAddr[11], + db->networkAddr[12], + db->networkAddr[13], + db->networkAddr[14], + db->networkAddr[15], + db->networkAddr[16]); +#else + printk("NAT25: Lookup M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + db->macAddr[0], + db->macAddr[1], + db->macAddr[2], + db->macAddr[3], + db->macAddr[4], + db->macAddr[5], + db->networkAddr[0], + db->networkAddr[1], + db->networkAddr[2], + db->networkAddr[3], + db->networkAddr[4], + db->networkAddr[5], + db->networkAddr[6], + db->networkAddr[7], + db->networkAddr[8], + db->networkAddr[9], + db->networkAddr[10]); +#endif + } + spin_unlock_bh(&vif->br_ext_lock); + return 1; + } + + db = db->next_hash; + } + + spin_unlock_bh(&vif->br_ext_lock); + return 0; +} + + +static void __nat25_db_network_insert(struct rwnx_vif *vif, + unsigned char *macAddr, unsigned char *networkAddr) +{ + struct nat25_network_db_entry *db; + int hash; + spin_lock_bh(&vif->br_ext_lock); + + hash = __nat25_network_hash(networkAddr); + db = vif->nethash[hash]; + + while (db != NULL) { + if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) { + memcpy(db->macAddr, macAddr, ETH_ALEN); + db->ageing_timer = jiffies; + spin_unlock_bh(&vif->br_ext_lock); + return; + } + + db = db->next_hash; + } + + db = (struct nat25_network_db_entry *)kmalloc(sizeof(*db), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + if (db == NULL) { + spin_unlock_bh(&vif->br_ext_lock); + return; + } + + memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN); + memcpy(db->macAddr, macAddr, ETH_ALEN); + atomic_set(&db->use_count, 1); + db->ageing_timer = jiffies; + + __network_hash_link(vif, db, hash); + + spin_unlock_bh(&vif->br_ext_lock); +} + + +static void __nat25_db_print(struct rwnx_vif *vif) +{ + spin_lock_bh(&vif->br_ext_lock); + +#ifdef BR_SUPPORT_DEBUG + static int counter = 0; + int i, j; + struct nat25_network_db_entry *db; + + counter++; + if ((counter % 16) != 0) + return; + + for (i = 0, j = 0; i < NAT25_HASH_SIZE; i++) { + db = vif->nethash[i]; + + while (db != NULL) { +#ifdef CL_IPV6_PASS + printk("NAT25: DB(%d) H(%02d) C(%d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" + "%02x%02x%02x%02x%02x%02x\n", + j, + i, + atomic_read(&db->use_count), + db->macAddr[0], + db->macAddr[1], + db->macAddr[2], + db->macAddr[3], + db->macAddr[4], + db->macAddr[5], + db->networkAddr[0], + db->networkAddr[1], + db->networkAddr[2], + db->networkAddr[3], + db->networkAddr[4], + db->networkAddr[5], + db->networkAddr[6], + db->networkAddr[7], + db->networkAddr[8], + db->networkAddr[9], + db->networkAddr[10], + db->networkAddr[11], + db->networkAddr[12], + db->networkAddr[13], + db->networkAddr[14], + db->networkAddr[15], + db->networkAddr[16]); +#else + printk("NAT25: DB(%d) H(%02d) C(%d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + j, + i, + atomic_read(&db->use_count), + db->macAddr[0], + db->macAddr[1], + db->macAddr[2], + db->macAddr[3], + db->macAddr[4], + db->macAddr[5], + db->networkAddr[0], + db->networkAddr[1], + db->networkAddr[2], + db->networkAddr[3], + db->networkAddr[4], + db->networkAddr[5], + db->networkAddr[6], + db->networkAddr[7], + db->networkAddr[8], + db->networkAddr[9], + db->networkAddr[10]); +#endif + j++; + + db = db->next_hash; + } + } +#endif + + spin_unlock_bh(&vif->br_ext_lock); +} + + + + +/* + * NAT2.5 interface + */ + +void nat25_db_cleanup(struct rwnx_vif *vif) +{ + int i; + spin_lock_bh(&vif->br_ext_lock); + + for (i = 0; i < NAT25_HASH_SIZE; i++) { + struct nat25_network_db_entry *f; + f = vif->nethash[i]; + while (f != NULL) { + struct nat25_network_db_entry *g; + + g = f->next_hash; + if (vif->scdb_entry == f) { + memset(vif->scdb_mac, 0, ETH_ALEN); + memset(vif->scdb_ip, 0, 4); + vif->scdb_entry = NULL; + } + __network_hash_unlink(f); + kfree(f); + + f = g; + } + } + + spin_unlock_bh(&vif->br_ext_lock); +} + + +void nat25_db_expire(struct rwnx_vif *vif) +{ + int i; + spin_lock_bh(&vif->br_ext_lock); + + /* if(!priv->ethBrExtInfo.nat25_disable) */ + { + for (i = 0; i < NAT25_HASH_SIZE; i++) { + struct nat25_network_db_entry *f; + f = vif->nethash[i]; + + while (f != NULL) { + struct nat25_network_db_entry *g; + g = f->next_hash; + + if (__nat25_has_expired(vif, f)) { + if (atomic_dec_and_test(&f->use_count)) { +#ifdef BR_SUPPORT_DEBUG +#ifdef CL_IPV6_PASS + panic_printk("NAT25 Expire H(%02d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" + "%02x%02x%02x%02x%02x%02x\n", + i, + f->macAddr[0], + f->macAddr[1], + f->macAddr[2], + f->macAddr[3], + f->macAddr[4], + f->macAddr[5], + f->networkAddr[0], + f->networkAddr[1], + f->networkAddr[2], + f->networkAddr[3], + f->networkAddr[4], + f->networkAddr[5], + f->networkAddr[6], + f->networkAddr[7], + f->networkAddr[8], + f->networkAddr[9], + f->networkAddr[10], + f->networkAddr[11], + f->networkAddr[12], + f->networkAddr[13], + f->networkAddr[14], + f->networkAddr[15], + f->networkAddr[16]); +#else + + panic_printk("NAT25 Expire H(%02d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + i, + f->macAddr[0], + f->macAddr[1], + f->macAddr[2], + f->macAddr[3], + f->macAddr[4], + f->macAddr[5], + f->networkAddr[0], + f->networkAddr[1], + f->networkAddr[2], + f->networkAddr[3], + f->networkAddr[4], + f->networkAddr[5], + f->networkAddr[6], + f->networkAddr[7], + f->networkAddr[8], + f->networkAddr[9], + f->networkAddr[10]); +#endif +#endif + if (vif->scdb_entry == f) { + memset(vif->scdb_mac, 0, ETH_ALEN); + memset(vif->scdb_ip, 0, 4); + vif->scdb_entry = NULL; + } + __network_hash_unlink(f); + kfree(f); + } + } + + f = g; + } + } + } + + spin_unlock_bh(&vif->br_ext_lock); +} + + +#ifdef SUPPORT_TX_MCAST2UNI +static int checkIPMcAndReplace(struct rwnx_vif *vif, struct sk_buff *skb, unsigned int *dst_ip) +{ + struct stat_info *pstat; + struct list_head *phead, *plist; + int i; + + phead = &vif->asoc_list; + plist = phead->next; + + while (plist != phead) { + pstat = list_entry(plist, struct stat_info, asoc_list); + plist = plist->next; + + if (pstat->ipmc_num == 0) + continue; + + for (i = 0; i < MAX_IP_MC_ENTRY; i++) { + if (pstat->ipmc[i].used && !memcmp(&pstat->ipmc[i].mcmac[3], ((unsigned char *)dst_ip) + 1, 3)) { + memcpy(skb->data, pstat->ipmc[i].mcmac, ETH_ALEN); + return 1; + } + } + } + return 0; +} +#endif + +int nat25_db_handle(struct rwnx_vif *vif, struct sk_buff *skb, int method) +{ + unsigned short protocol; + unsigned char networkAddr[MAX_NETWORK_ADDR_LEN]; + + if (skb == NULL) + return -1; + + if ((method <= NAT25_MIN) || (method >= NAT25_MAX)) + return -1; + + protocol = *((unsigned short *)(skb->data + 2 * ETH_ALEN)); + + /*---------------------------------------------------*/ + /* Handle IP frame */ + /*---------------------------------------------------*/ + if (protocol == __constant_htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN); + + if (((unsigned char *)(iph) + (iph->ihl << 2)) >= (skb->data + ETH_HLEN + skb->len)) { + printk("NAT25: malformed IP packet !\n"); + return -1; + } + + switch (method) { + case NAT25_CHECK: + return -1; + + case NAT25_INSERT: { + /* some muticast with source IP is all zero, maybe other case is illegal */ + /* in class A, B, C, host address is all zero or all one is illegal */ + if (iph->saddr == 0) + return 0; + printk("NAT25: Insert IP, SA=%08x, DA=%08x\n", iph->saddr, iph->daddr); + __nat25_generate_ipv4_network_addr(networkAddr, &iph->saddr); + /* record source IP address and , source mac address into db */ + __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr); + + __nat25_db_print(vif); + } + return 0; + + case NAT25_LOOKUP: { + printk("NAT25: Lookup IP, SA=%08x, DA=%08x\n", iph->saddr, iph->daddr); +#ifdef SUPPORT_TX_MCAST2UNI + if (vif->pshare->rf_ft_var.mc2u_disable || + ((((OPMODE & (WIFI_STATION_STATE | WIFI_ASOC_STATE)) + == (WIFI_STATION_STATE | WIFI_ASOC_STATE)) && + !checkIPMcAndReplace(vif, skb, &iph->daddr)) || + (OPMODE & WIFI_ADHOC_STATE))) +#endif + { + __nat25_generate_ipv4_network_addr(networkAddr, &iph->daddr); + + if (!__nat25_db_network_lookup_and_replace(vif, skb, networkAddr)) { + if (*((unsigned char *)&iph->daddr + 3) == 0xff) { + /* L2 is unicast but L3 is broadcast, make L2 bacome broadcast */ + printk("NAT25: Set DA as boardcast\n"); + memset(skb->data, 0xff, ETH_ALEN); + } else { + /* forward unknow IP packet to upper TCP/IP */ + printk("NAT25: Replace DA with BR's MAC\n"); + if ((*(u32 *)vif->br_mac) == 0 && (*(u16 *)(vif->br_mac + 4)) == 0) { + void netdev_br_init(struct net_device *netdev); + printk("Re-init netdev_br_init() due to br_mac==0!\n"); + netdev_br_init(vif->ndev); + } + memcpy(skb->data, vif->br_mac, ETH_ALEN); + } + } + } + } + return 0; + + default: + return -1; + } + } + + /*---------------------------------------------------*/ + /* Handle ARP frame */ + /*---------------------------------------------------*/ + else if (protocol == __constant_htons(ETH_P_ARP)) { + struct arphdr *arp = (struct arphdr *)(skb->data + ETH_HLEN); + unsigned char *arp_ptr = (unsigned char *)(arp + 1); + unsigned int *sender, *target; + + if (arp->ar_pro != __constant_htons(ETH_P_IP)) { + printk("NAT25: arp protocol unknown (%4x)!\n", htons(arp->ar_pro)); + return -1; + } + + switch (method) { + case NAT25_CHECK: + return 0; /* skb_copy for all ARP frame */ + + case NAT25_INSERT: { + printk("NAT25: Insert ARP, MAC=%02x%02x%02x%02x%02x%02x\n", arp_ptr[0], + arp_ptr[1], arp_ptr[2], arp_ptr[3], arp_ptr[4], arp_ptr[5]); + + /* change to ARP sender mac address to wlan STA address */ + memcpy(arp_ptr, vif->ndev->dev_addr, ETH_ALEN); + + arp_ptr += arp->ar_hln; + sender = (unsigned int *)arp_ptr; + + __nat25_generate_ipv4_network_addr(networkAddr, sender); + + __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr); + + __nat25_db_print(vif); + } + return 0; + + case NAT25_LOOKUP: { + printk("NAT25: Lookup ARP\n"); + + arp_ptr += arp->ar_hln; + sender = (unsigned int *)arp_ptr; + arp_ptr += (arp->ar_hln + arp->ar_pln); + target = (unsigned int *)arp_ptr; + + __nat25_generate_ipv4_network_addr(networkAddr, target); + + __nat25_db_network_lookup_and_replace(vif, skb, networkAddr); + + /* change to ARP target mac address to Lookup result */ + arp_ptr = (unsigned char *)(arp + 1); + arp_ptr += (arp->ar_hln + arp->ar_pln); + memcpy(arp_ptr, skb->data, ETH_ALEN); + } + return 0; + + default: + return -1; + } + } + + /*---------------------------------------------------*/ + /* Handle IPX and Apple Talk frame */ + /*---------------------------------------------------*/ + else if ((protocol == __constant_htons(ETH_P_IPX)) || + (protocol == __constant_htons(ETH_P_ATALK)) || + (protocol == __constant_htons(ETH_P_AARP))) { + unsigned char ipx_header[2] = {0xFF, 0xFF}; + struct ipxhdr *ipx = NULL; + struct elapaarp *ea = NULL; + struct ddpehdr *ddp = NULL; + unsigned char *framePtr = skb->data + ETH_HLEN; + + if (protocol == __constant_htons(ETH_P_IPX)) { + printk("NAT25: Protocol=IPX (Ethernet II)\n"); + ipx = (struct ipxhdr *)framePtr; + } else { /* if(protocol <= __constant_htons(ETH_FRAME_LEN)) */ + if (!memcmp(ipx_header, framePtr, 2)) { + printk("NAT25: Protocol=IPX (Ethernet 802.3)\n"); + ipx = (struct ipxhdr *)framePtr; + } else { + unsigned char ipx_8022_type = 0xE0; + unsigned char snap_8022_type = 0xAA; + + if (*framePtr == snap_8022_type) { + unsigned char ipx_snap_id[5] = {0x0, 0x0, 0x0, 0x81, 0x37}; /* IPX SNAP ID */ + unsigned char aarp_snap_id[5] = {0x00, 0x00, 0x00, 0x80, 0xF3}; /* Apple Talk AARP SNAP ID */ + unsigned char ddp_snap_id[5] = {0x08, 0x00, 0x07, 0x80, 0x9B}; /* Apple Talk DDP SNAP ID */ + + framePtr += 3; /* eliminate the 802.2 header */ + + if (!memcmp(ipx_snap_id, framePtr, 5)) { + framePtr += 5; /* eliminate the SNAP header */ + + printk("NAT25: Protocol=IPX (Ethernet SNAP)\n"); + ipx = (struct ipxhdr *)framePtr; + } else if (!memcmp(aarp_snap_id, framePtr, 5)) { + framePtr += 5; /* eliminate the SNAP header */ + + ea = (struct elapaarp *)framePtr; + } else if (!memcmp(ddp_snap_id, framePtr, 5)) { + framePtr += 5; /* eliminate the SNAP header */ + + ddp = (struct ddpehdr *)framePtr; + } else { + printk("NAT25: Protocol=Ethernet SNAP %02x%02x%02x%02x%02x\n", framePtr[0], + framePtr[1], framePtr[2], framePtr[3], framePtr[4]); + return -1; + } + } else if (*framePtr == ipx_8022_type) { + framePtr += 3; /* eliminate the 802.2 header */ + + if (!memcmp(ipx_header, framePtr, 2)) { + printk("NAT25: Protocol=IPX (Ethernet 802.2)\n"); + ipx = (struct ipxhdr *)framePtr; + } else + return -1; + } + } + } + + /* IPX */ + if (ipx != NULL) { + switch (method) { + case NAT25_CHECK: + if (!memcmp(skb->data + ETH_ALEN, ipx->ipx_source.node, ETH_ALEN)) { + printk("NAT25: Check IPX skb_copy\n"); + return 0; + } + return -1; + + case NAT25_INSERT: { + printk("NAT25: Insert IPX, Dest=%08x,%02x%02x%02x%02x%02x%02x,%04x Source=%08x,%02x%02x%02x%02x%02x%02x,%04x\n", + ipx->ipx_dest.net, + ipx->ipx_dest.node[0], + ipx->ipx_dest.node[1], + ipx->ipx_dest.node[2], + ipx->ipx_dest.node[3], + ipx->ipx_dest.node[4], + ipx->ipx_dest.node[5], + ipx->ipx_dest.sock, + ipx->ipx_source.net, + ipx->ipx_source.node[0], + ipx->ipx_source.node[1], + ipx->ipx_source.node[2], + ipx->ipx_source.node[3], + ipx->ipx_source.node[4], + ipx->ipx_source.node[5], + ipx->ipx_source.sock); + + if (!memcmp(skb->data + ETH_ALEN, ipx->ipx_source.node, ETH_ALEN)) { + printk("NAT25: Use IPX Net, and Socket as network addr\n"); + + __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_source.net, &ipx->ipx_source.sock); + + /* change IPX source node addr to wlan STA address */ + memcpy(ipx->ipx_source.node, vif->ndev->dev_addr, ETH_ALEN); + } else + __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_source.net, ipx->ipx_source.node); + + __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr); + + __nat25_db_print(vif); + } + return 0; + + case NAT25_LOOKUP: { + if (!memcmp(vif->ndev->dev_addr, ipx->ipx_dest.node, ETH_ALEN)) { + printk("NAT25: Lookup IPX, Modify Destination IPX Node addr\n"); + + __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_dest.net, &ipx->ipx_dest.sock); + + __nat25_db_network_lookup_and_replace(vif, skb, networkAddr); + + /* replace IPX destination node addr with Lookup destination MAC addr */ + memcpy(ipx->ipx_dest.node, skb->data, ETH_ALEN); + } else { + __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_dest.net, ipx->ipx_dest.node); + + __nat25_db_network_lookup_and_replace(vif, skb, networkAddr); + } + } + return 0; + + default: + return -1; + } + } + + /* AARP */ + else if (ea != NULL) { + /* Sanity check fields. */ + if (ea->hw_len != ETH_ALEN || ea->pa_len != AARP_PA_ALEN) { + printk("NAT25: Appletalk AARP Sanity check fail!\n"); + return -1; + } + + switch (method) { + case NAT25_CHECK: + return 0; + + case NAT25_INSERT: { + /* change to AARP source mac address to wlan STA address */ + memcpy(ea->hw_src, vif->ndev->dev_addr, ETH_ALEN); + + printk("NAT25: Insert AARP, Source=%d,%d Destination=%d,%d\n", + ea->pa_src_net, + ea->pa_src_node, + ea->pa_dst_net, + ea->pa_dst_node); + + __nat25_generate_apple_network_addr(networkAddr, &ea->pa_src_net, &ea->pa_src_node); + + __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr); + + __nat25_db_print(vif); + } + return 0; + + case NAT25_LOOKUP: { + printk("NAT25: Lookup AARP, Source=%d,%d Destination=%d,%d\n", + ea->pa_src_net, + ea->pa_src_node, + ea->pa_dst_net, + ea->pa_dst_node); + + __nat25_generate_apple_network_addr(networkAddr, &ea->pa_dst_net, &ea->pa_dst_node); + + __nat25_db_network_lookup_and_replace(vif, skb, networkAddr); + + /* change to AARP destination mac address to Lookup result */ + memcpy(ea->hw_dst, skb->data, ETH_ALEN); + } + return 0; + + default: + return -1; + } + } + + /* DDP */ + else if (ddp != NULL) { + switch (method) { + case NAT25_CHECK: + return -1; + + case NAT25_INSERT: { + printk("NAT25: Insert DDP, Source=%d,%d Destination=%d,%d\n", + ddp->deh_snet, + ddp->deh_snode, + ddp->deh_dnet, + ddp->deh_dnode); + + __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_snet, &ddp->deh_snode); + + __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr); + + __nat25_db_print(vif); + } + return 0; + + case NAT25_LOOKUP: { + printk("NAT25: Lookup DDP, Source=%d,%d Destination=%d,%d\n", + ddp->deh_snet, + ddp->deh_snode, + ddp->deh_dnet, + ddp->deh_dnode); + + __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_dnet, &ddp->deh_dnode); + + __nat25_db_network_lookup_and_replace(vif, skb, networkAddr); + } + return 0; + + default: + return -1; + } + } + + return -1; + } + + /*---------------------------------------------------*/ + /* Handle PPPoE frame */ + /*---------------------------------------------------*/ + else if ((protocol == __constant_htons(ETH_P_PPP_DISC)) || + (protocol == __constant_htons(ETH_P_PPP_SES))) { + struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN); + unsigned short *pMagic; + + switch (method) { + case NAT25_CHECK: + if (ph->sid == 0) + return 0; + return 1; + + case NAT25_INSERT: + if (ph->sid == 0) { /* Discovery phase according to tag */ + if (ph->code == PADI_CODE || ph->code == PADR_CODE) { + if (vif->ethBrExtInfo.addPPPoETag) { + struct pppoe_tag *tag, *pOldTag; + unsigned char tag_buf[40]; + int old_tag_len = 0; + + tag = (struct pppoe_tag *)tag_buf; + pOldTag = (struct pppoe_tag *)__nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID)); + if (pOldTag) { /* if SID existed, copy old value and delete it */ + old_tag_len = ntohs(pOldTag->tag_len); + if (old_tag_len + TAG_HDR_LEN + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN > sizeof(tag_buf)) { + printk("SID tag length too long!\n"); + return -1; + } + + memcpy(tag->tag_data + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN, + pOldTag->tag_data, old_tag_len); + + if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN + old_tag_len) < 0) { + printk("call skb_pull_and_merge() failed in PADI/R packet!\n"); + return -1; + } + ph->length = htons(ntohs(ph->length) - TAG_HDR_LEN - old_tag_len); + } + + tag->tag_type = PTT_RELAY_SID; + tag->tag_len = htons(MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN + old_tag_len); + + /* insert the magic_code+client mac in relay tag */ + pMagic = (unsigned short *)tag->tag_data; + *pMagic = htons(MAGIC_CODE); + memcpy(tag->tag_data + MAGIC_CODE_LEN, skb->data + ETH_ALEN, ETH_ALEN); + + /* Add relay tag */ + if (__nat25_add_pppoe_tag(skb, tag) < 0) + return -1; + + printk("NAT25: Insert PPPoE, forward %s packet\n", + (ph->code == PADI_CODE ? "PADI" : "PADR")); + } else { /* not add relay tag */ + if (vif->pppoe_connection_in_progress && + memcmp(skb->data + ETH_ALEN, vif->pppoe_addr, ETH_ALEN)) { + printk("Discard PPPoE packet due to another PPPoE connection is in progress!\n"); + return -2; + } + + if (vif->pppoe_connection_in_progress == 0) + memcpy(vif->pppoe_addr, skb->data + ETH_ALEN, ETH_ALEN); + + vif->pppoe_connection_in_progress = WAIT_TIME_PPPOE; + } + } else + return -1; + } else { /* session phase */ + printk("NAT25: Insert PPPoE, insert session packet to %s\n", skb->dev->name); + + __nat25_generate_pppoe_network_addr(networkAddr, skb->data, &(ph->sid)); + + __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr); + + __nat25_db_print(vif); + + if (!vif->ethBrExtInfo.addPPPoETag && + vif->pppoe_connection_in_progress && + !memcmp(skb->data + ETH_ALEN, vif->pppoe_addr, ETH_ALEN)) + vif->pppoe_connection_in_progress = 0; + } + return 0; + + case NAT25_LOOKUP: + if (ph->code == PADO_CODE || ph->code == PADS_CODE) { + if (vif->ethBrExtInfo.addPPPoETag) { + struct pppoe_tag *tag; + unsigned char *ptr; + unsigned short tagType, tagLen; + int offset = 0; + + ptr = __nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID)); + if (ptr == 0) { + printk("Fail to find PTT_RELAY_SID in FADO!\n"); + return -1; + } + + tag = (struct pppoe_tag *)ptr; + tagType = (unsigned short)((ptr[0] << 8) + ptr[1]); + tagLen = (unsigned short)((ptr[2] << 8) + ptr[3]); + + if ((tagType != ntohs(PTT_RELAY_SID)) || (tagLen < (MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN))) { + printk("Invalid PTT_RELAY_SID tag length [%d]!\n", tagLen); + return -1; + } + + pMagic = (unsigned short *)tag->tag_data; + if (ntohs(*pMagic) != MAGIC_CODE) { + printk("Can't find MAGIC_CODE in %s packet!\n", + (ph->code == PADO_CODE ? "PADO" : "PADS")); + return -1; + } + + memcpy(skb->data, tag->tag_data + MAGIC_CODE_LEN, ETH_ALEN); + + if (tagLen > MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN) + offset = TAG_HDR_LEN; + + if (skb_pull_and_merge(skb, ptr + offset, TAG_HDR_LEN + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN - offset) < 0) { + printk("call skb_pull_and_merge() failed in PADO packet!\n"); + return -1; + } + ph->length = htons(ntohs(ph->length) - (TAG_HDR_LEN + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN - offset)); + if (offset > 0) + tag->tag_len = htons(tagLen - MAGIC_CODE_LEN - RTL_RELAY_TAG_LEN); + + printk("NAT25: Lookup PPPoE, forward %s Packet from %s\n", + (ph->code == PADO_CODE ? "PADO" : "PADS"), skb->dev->name); + } else { /* not add relay tag */ + if (!vif->pppoe_connection_in_progress) { + printk("Discard PPPoE packet due to no connection in progresss!\n"); + return -1; + } + memcpy(skb->data, vif->pppoe_addr, ETH_ALEN); + vif->pppoe_connection_in_progress = WAIT_TIME_PPPOE; + } + } else { + if (ph->sid != 0) { + printk("NAT25: Lookup PPPoE, lookup session packet from %s\n", skb->dev->name); + __nat25_generate_pppoe_network_addr(networkAddr, skb->data + ETH_ALEN, &(ph->sid)); + + __nat25_db_network_lookup_and_replace(vif, skb, networkAddr); + + __nat25_db_print(vif); + } else + return -1; + + } + return 0; + + default: + return -1; + } + } + + /*---------------------------------------------------*/ + /* Handle EAP frame */ + /*---------------------------------------------------*/ + else if (protocol == __constant_htons(0x888e)) { + switch (method) { + case NAT25_CHECK: + return -1; + + case NAT25_INSERT: + return 0; + + case NAT25_LOOKUP: + return 0; + + default: + return -1; + } + } + + /*---------------------------------------------------*/ + /* Handle C-Media proprietary frame */ + /*---------------------------------------------------*/ + else if ((protocol == __constant_htons(0xe2ae)) || + (protocol == __constant_htons(0xe2af))) { + switch (method) { + case NAT25_CHECK: + return -1; + + case NAT25_INSERT: + return 0; + + case NAT25_LOOKUP: + return 0; + + default: + return -1; + } + } + + /*---------------------------------------------------*/ + /* Handle IPV6 frame */ + /*---------------------------------------------------*/ +#ifdef CL_IPV6_PASS + else if (protocol == __constant_htons(ETH_P_IPV6)) { + struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + ETH_HLEN); + + if (sizeof(*iph) >= (skb->len - ETH_HLEN)) { + printk("NAT25: malformed IPv6 packet !\n"); + return -1; + } + + switch (method) { + case NAT25_CHECK: + if (skb->data[0] & 1) + return 0; + return -1; + + case NAT25_INSERT: { + printk("NAT25: Insert IP, SA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x," + " DA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n", + iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3], + iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7], + iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3], + iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]); + + if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) { + __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr); + __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr); + __nat25_db_print(vif); + + if (iph->nexthdr == IPPROTO_ICMPV6 && + skb->len > (ETH_HLEN + sizeof(*iph) + 4)) { + if (update_nd_link_layer_addr(skb->data + ETH_HLEN + sizeof(*iph), + skb->len - ETH_HLEN - sizeof(*iph), vif->ndev->dev_addr)) { + struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph)); + hdr->icmp6_cksum = 0; + hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr, + iph->payload_len, + IPPROTO_ICMPV6, + csum_partial((__u8 *)hdr, iph->payload_len, 0)); + } + } + } + } + return 0; + + case NAT25_LOOKUP: + printk("NAT25: Lookup IP, SA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x," + " DA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n", + iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3], + iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7], + iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3], + iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]); + + + __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->daddr); + if (!__nat25_db_network_lookup_and_replace(vif, skb, networkAddr)) { +#ifdef SUPPORT_RX_UNI2MCAST + if (iph->daddr.s6_addr[0] == 0xff) + convert_ipv6_mac_to_mc(skb); +#endif + } + return 0; + + default: + return -1; + } + } +#endif /* CL_IPV6_PASS */ + + return -1; +} + + +int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb) +{ + //printk("%s : vif_type=%d \n",__func__,RWNX_VIF_TYPE(vif)); +#ifdef BR_SUPPORT_DEBUG + if ((!vif->ethBrExtInfo.nat25_disable) && (!(skb->data[0] & 1))) { + printk("NAT25: Input Frame: DA=%02x%02x%02x%02x%02x%02x SA=%02x%02x%02x%02x%02x%02x\n", + skb->data[0], + skb->data[1], + skb->data[2], + skb->data[3], + skb->data[4], + skb->data[5], + skb->data[6], + skb->data[7], + skb->data[8], + skb->data[9], + skb->data[10], + skb->data[11]); + } +#endif + + if (!(skb->data[0] & 1)) { + int is_vlan_tag = 0, i, retval = 0; + unsigned short vlan_hdr = 0; + + if (*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_8021Q)) { + is_vlan_tag = 1; + vlan_hdr = *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)); + for (i = 0; i < 6; i++) + *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + ETH_ALEN * 2 - 2 - i * 2)); + skb_pull(skb, 4); + } + + if (!vif->ethBrExtInfo.nat25_disable) { + unsigned long irqL; + spin_lock_bh(&vif->br_ext_lock); + /* + * This function look up the destination network address from + * the NAT2.5 database. Return value = -1 means that the + * corresponding network protocol is NOT support. + */ + if (!vif->ethBrExtInfo.nat25sc_disable && + (*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP)) && + !memcmp(vif->scdb_ip, skb->data + ETH_HLEN + 16, 4)) { + memcpy(skb->data, vif->scdb_mac, ETH_ALEN); + + spin_unlock_bh(&vif->br_ext_lock); + } else { + spin_unlock_bh(&vif->br_ext_lock); + + retval = nat25_db_handle(vif, skb, NAT25_LOOKUP); + } + } else { + if (((*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP)) && + !memcmp(vif->br_ip, skb->data + ETH_HLEN + 16, 4)) || + ((*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_ARP)) && + !memcmp(vif->br_ip, skb->data + ETH_HLEN + 24, 4))) { + /* for traffic to upper TCP/IP */ + retval = nat25_db_handle(vif, skb, NAT25_LOOKUP); + } + } + + if (is_vlan_tag) { + skb_push(skb, 4); + for (i = 0; i < 6; i++) + *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2)); + *((unsigned short *)(skb->data + ETH_ALEN * 2)) = __constant_htons(ETH_P_8021Q); + *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr; + } + + if (retval == -1) { + /* DEBUG_ERR("NAT25: Lookup fail!\n"); */ + return -1; + } + } + + return 0; +} + +#if 0 +void mac_clone(_adapter *priv, unsigned char *addr) +{ + struct sockaddr sa; + + memcpy(sa.sa_data, addr, ETH_ALEN); + RTW_INFO("MAC Clone: Addr=%02x%02x%02x%02x%02x%02x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + rtl8192cd_set_hwaddr(priv->dev, &sa); +} + + +int mac_clone_handle_frame(_adapter *priv, struct sk_buff *skb) +{ + if (priv->ethBrExtInfo.macclone_enable && !priv->macclone_completed) { + if (!(skb->data[ETH_ALEN] & 1)) { /* check any other particular MAC add */ + if (memcmp(skb->data + ETH_ALEN, GET_MY_HWADDR(priv), ETH_ALEN) && + ((priv->dev->br_port) && + memcmp(skb->data + ETH_ALEN, priv->br_mac, ETH_ALEN))) { + mac_clone(priv, skb->data + ETH_ALEN); + priv->macclone_completed = 1; + } + } + } + + return 0; +} +#endif /* 0 */ + +#define SERVER_PORT 67 +#define CLIENT_PORT 68 +#define DHCP_MAGIC 0x63825363 +#define BROADCAST_FLAG 0x8000 + +struct dhcpMessage { + u_int8_t op; + u_int8_t htype; + u_int8_t hlen; + u_int8_t hops; + u_int32_t xid; + u_int16_t secs; + u_int16_t flags; + u_int32_t ciaddr; + u_int32_t yiaddr; + u_int32_t siaddr; + u_int32_t giaddr; + u_int8_t chaddr[16]; + u_int8_t sname[64]; + u_int8_t file[128]; + u_int32_t cookie; + u_int8_t options[308]; /* 312 - cookie */ +}; + +void dhcp_flag_bcast(struct rwnx_vif *vif, struct sk_buff *skb) +{ + if (skb == NULL) + return; + //print_hex_dump(KERN_ERR, "SKB DUMP: SKB->DATA== ", DUMP_PREFIX_NONE, 32, 1, skb->data, 64,false); + if (!vif->ethBrExtInfo.dhcp_bcst_disable) { + unsigned short protocol = *((unsigned short *)(skb->data + 2 * ETH_ALEN)); + printk("%s protocol: %04x\n", __func__, protocol); + + if (protocol == __constant_htons(ETH_P_IP)) { /* IP */ + struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN); + + if (iph->protocol == IPPROTO_UDP) { /* UDP */ + struct udphdr *udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2)); + + if ((udph->source == __constant_htons(CLIENT_PORT)) + && (udph->dest == __constant_htons(SERVER_PORT))) { /* DHCP request */ + struct dhcpMessage *dhcph = + (struct dhcpMessage *)((u8 *)udph + sizeof(struct udphdr)); + + if (dhcph->cookie == __constant_htonl(DHCP_MAGIC)) { /* match magic word */ + if (!(dhcph->flags & htons(BROADCAST_FLAG))) { /* if not broadcast */ + register int sum = 0; + + printk("DHCP: change flag of DHCP request to broadcast.\n"); + + #if 1 + /* or BROADCAST flag */ + dhcph->flags |= htons(BROADCAST_FLAG); + /* recalculate checksum */ + sum = ~(udph->check) & 0xffff; + sum += dhcph->flags; + while (sum >> 16) + sum = (sum & 0xffff) + (sum >> 16); + udph->check = ~sum; + #endif + } + } + } + } + } + } +} + + +void *scdb_findEntry(struct rwnx_vif *vif, unsigned char *macAddr, + unsigned char *ipAddr) +{ + printk("%s()\n",__func__); + unsigned char networkAddr[MAX_NETWORK_ADDR_LEN]; + struct nat25_network_db_entry *db; + int hash; + /* _irqL irqL; */ + /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */ + + __nat25_generate_ipv4_network_addr(networkAddr, (unsigned int *)ipAddr); + hash = __nat25_network_hash(networkAddr); + db = vif->nethash[hash]; + while (db != NULL) { + if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) { + /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */ + return (void *)db; + } + + db = db->next_hash; + } + + /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */ + return NULL; +} + +#endif /* CONFIG_BR_SUPPORT */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_br_ext.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_br_ext.h new file mode 100644 index 0000000..71ebeb2 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_br_ext.h @@ -0,0 +1,73 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2017 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + *****************************************************************************/ +#ifndef _AIC_BR_EXT_H_ +#define _AIC_BR_EXT_H_ + +#define CL_IPV6_PASS 1 +#define MACADDRLEN 6 +#define WLAN_ETHHDR_LEN 14 + +#define NAT25_HASH_BITS 4 +#define NAT25_HASH_SIZE (1 << NAT25_HASH_BITS) +#define NAT25_AGEING_TIME 300 + +#define NDEV_FMT "%s" +#define NDEV_ARG(ndev) ndev->name +#define ADPT_FMT "%s" +//#define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL) +#define FUNC_NDEV_FMT "%s(%s)" +#define FUNC_NDEV_ARG(ndev) __func__, ndev->name +#define FUNC_ADPT_FMT "%s(%s)" +//#define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL) +#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5] + + +#ifdef CL_IPV6_PASS + #define MAX_NETWORK_ADDR_LEN 17 +#else + #define MAX_NETWORK_ADDR_LEN 11 +#endif + +struct nat25_network_db_entry { + struct nat25_network_db_entry *next_hash; + struct nat25_network_db_entry **pprev_hash; + atomic_t use_count; + unsigned char macAddr[6]; + unsigned long ageing_timer; + unsigned char networkAddr[MAX_NETWORK_ADDR_LEN]; +}; + +enum NAT25_METHOD { + NAT25_MIN, + NAT25_CHECK, + NAT25_INSERT, + NAT25_LOOKUP, + NAT25_PARSE, + NAT25_MAX +}; + +struct br_ext_info { + unsigned int nat25_disable; + unsigned int macclone_enable; + unsigned int dhcp_bcst_disable; + int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */ + unsigned char nat25_dmzMac[MACADDRLEN]; + unsigned int nat25sc_disable; +}; + +void nat25_db_cleanup(struct rwnx_vif *vif); + +#endif /* _AIC_BR_EXT_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_priv_cmd.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_priv_cmd.c new file mode 100644 index 0000000..5a4bfb4 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_priv_cmd.c @@ -0,0 +1,2074 @@ +/** + ****************************************************************************** + * + * @file aic_priv_cmd.c + * + * @brief Entry point of the AIC driver + * + * Copyright (C) Aicsemi 2018-2024 + * + ****************************************************************************** + */ + +#include +#include +#include "rwnx_defs.h" +#include "rwnx_msg_tx.h" +#include "rwnx_debugfs.h" +#include "rwnx_main.h" +#include "aic_priv_cmd.h" +#include "aicwf_compat_8800dc.h" +#include "aicwf_compat_8800d80.h" +#include "aicwf_compat_8800d80x2.h" +#include "rwnx_mod_params.h" +#include "rwnx_platform.h" + + +#define FLASH_BIN_ADDR_8800M80 0x8000000 + +extern void set_testmode(int); +extern int get_flash_bin_size(void); +extern u32 get_flash_bin_crc(void); +extern int testmode; + +extern int rwnx_fill_station_info(struct rwnx_sta *sta, struct rwnx_vif *vif, + struct station_info *sinfo, u8 *phymode, u32 *tx_phyrate, u32 *rx_phyrate); + +static void print_help(const char *cmd); +struct dbg_rftest_cmd_cfm cfm = {{0,}}; + +#ifdef CONFIG_RFTEST +enum { + SET_TX, + SET_TXSTOP, + SET_TXTONE, + SET_RX, + GET_RX_RESULT, + SET_RXSTOP, + SET_RX_METER, + SET_POWER, + SET_XTAL_CAP, + SET_XTAL_CAP_FINE, + GET_EFUSE_BLOCK, + SET_FREQ_CAL, + SET_FREQ_CAL_FINE, + GET_FREQ_CAL, + SET_MAC_ADDR, + GET_MAC_ADDR, + SET_BT_MAC_ADDR, + GET_BT_MAC_ADDR, + SET_VENDOR_INFO, + GET_VENDOR_INFO, + RDWR_PWRMM, + RDWR_PWRIDX, + RDWR_PWRLVL = RDWR_PWRIDX, + RDWR_PWROFST, + RDWR_DRVIBIT, + RDWR_EFUSE_PWROFST, + RDWR_EFUSE_DRVIBIT, + SET_PAPR, + SET_CAL_XTAL, + GET_CAL_XTAL_RES, + SET_COB_CAL, + GET_COB_CAL_RES, + RDWR_EFUSE_USRDATA, + SET_NOTCH, + RDWR_PWROFSTFINE, + RDWR_EFUSE_PWROFSTFINE, + RDWR_EFUSE_SDIOCFG, + RDWR_EFUSE_USBVIDPID, + SET_SRRC, + SET_FSS, + RDWR_EFUSE_HE_OFF, + SET_USB_OFF, + SET_PLL_TEST, + SET_ANT_MODE, + GET_NOISE, + RDWR_BT_EFUSE_PWROFST, + EXEC_FLASH_OPER, + RDWR_PWRADD2X, + RDWR_EFUSE_PWRADD2X, + CHECK_FLASH=0x108, +}; + +typedef struct { + u8_l chan; + u8_l bw; + u8_l mode; + u8_l rate; + u16_l length; + u16_l tx_intv_us; + s8_l max_pwr; +} cmd_rf_settx_t; + +typedef struct { + u8_l val; +} cmd_rf_setfreq_t; + +typedef struct { + u8_l chan; + u8_l bw; +} cmd_rf_rx_t; + +typedef struct { + u8_l block; +} cmd_rf_getefuse_t; + +typedef struct { + u8_l dutid; + u8_l chip_num; + u8_l dis_xtal; +} cmd_rf_setcobcal_t; + +typedef struct { + u16_l dut_rcv_golden_num; + u8_l golden_rcv_dut_num; + s8_l rssi_static; + s8_l snr_static; + s8_l dut_rssi_static; + u16_l reserved; +} cob_result_ptr_t; +#endif + +typedef struct +{ + u8_l func; + u8_l cnt; + u8_l reserved[2]; + u32_l usrdata[3]; // 3 words totally +} cmd_ef_usrdata_t; + +typedef struct +{ + u32_l flash_read_addr; + u32_l flash_read_size; +} cmd_check_flash_t; + +extern char country_code[]; + +#define CMD_MAXARGS 30 + +#if 0//#include +#define isblank(c) ((c) == ' ' || (c) == '\t') +#define isascii(c) (((unsigned char)(c)) <= 0x7F) + +static int isdigit(unsigned char c) +{ + return ((c >= '0') && (c <='9')); +} + +static int isxdigit(unsigned char c) +{ + if ((c >= '0') && (c <='9')) + return 1; + if ((c >= 'a') && (c <='f')) + return 1; + if ((c >= 'A') && (c <='F')) + return 1; + return 0; +} + +static int islower(unsigned char c) +{ + return ((c >= 'a') && (c <='z')); +} + +static unsigned char toupper(unsigned char c) +{ + if (islower(c)) + c -= 'a'-'A'; + return c; +} +#endif + +static int parse_line (char *line, char *argv[]) +{ + int nargs = 0; + + while (nargs < CMD_MAXARGS) { + /* skip any white space */ + while ((*line == ' ') || (*line == '\t')) { + ++line; + } + + if (*line == '\0') { /* end of line, no more args */ + argv[nargs] = 0; + return nargs; + } + + /* Argument include space should be bracketed by quotation mark */ + if (*line == '\"') { + /* Skip quotation mark */ + line++; + + /* Begin of argument string */ + argv[nargs++] = line; + + /* Until end of argument */ + while (*line && (*line != '\"')) { + ++line; + } + } else { + argv[nargs++] = line; /* begin of argument string */ + + /* find end of string */ + while (*line && (*line != ' ') && (*line != '\t')) { + ++line; + } + } + + if (*line == '\0') { /* end of line, no more args */ + argv[nargs] = 0; + return nargs; + } + + *line++ = '\0'; /* terminate current arg */ + } + + printk("** Too many args (max. %d) **\n", CMD_MAXARGS); + + return nargs; +} + +unsigned int command_strtoul(const char *cp, char **endp, unsigned int base) +{ + unsigned int result = 0, value, is_neg = 0; + + if (*cp == '0') { + cp++; + if ((*cp == 'x') && isxdigit(cp[1])) { + base = 16; + cp++; + } + if (!base) { + base = 8; + } + } + if (!base) { + base = 10; + } + if (*cp == '-') { + is_neg = 1; + cp++; + } + while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp - '0' : (islower(*cp) ? toupper(*cp) : *cp) - 'A' + 10) < base) { + result = result * base + value; + cp++; + } + if (is_neg) + result = (unsigned int)((int)result * (-1)); + + if (endp) + *endp = (char *)cp; + return result; +} + +int str_starts(const char *str, const char *start) +{ + return strncmp(str, start, strlen(start)) == 0; +} + +/* + * aic_priv_cmd handers. + */ +static int aic_priv_cmd_set_tx (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_rf_settx_t settx_param; + u8_l set_p = 0; + u8_l lvl_band, lvl_mod, lvl_idx, lvl_pwr = 0; + u8_l buf[10]; +#ifdef CONFIG_POWER_LIMIT + int8_t max_pwr; + uint8_t r_idx; + txpwr_loss_conf_t txpwr_loss_tmp; + txpwr_loss_conf_t *txpwr_loss; + txpwr_loss = &txpwr_loss_tmp; +#endif +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (argc < 6) + return -EINVAL; + + settx_param.chan = command_strtoul(argv[1], NULL, 10); + settx_param.bw = command_strtoul(argv[2], NULL, 10); + settx_param.mode = command_strtoul(argv[3], NULL, 10); + settx_param.rate = command_strtoul(argv[4], NULL, 10); + settx_param.length = command_strtoul(argv[5], NULL, 10); + if (argc > 6) { + settx_param.tx_intv_us = command_strtoul(argv[6], NULL, 10); + } else { + settx_param.tx_intv_us = 10000; // set default val 10ms + } + if (argc > 7) { + if (dev->chipid == PRODUCT_ID_AIC8801){ + AICWFDBG(LOGERROR, "unsupported cmd\n"); + return -EINVAL; + } + lvl_pwr = command_strtoul(argv[7], NULL, 10); + AICWFDBG(LOGINFO, "lvl_pwr: %d\n", lvl_pwr); + + if (settx_param.chan >= 36) + lvl_band = 2; + else + lvl_band = 1; + if (settx_param.mode == 0) + lvl_mod = 0; + else if (settx_param.mode == 2 || settx_param.mode == 4) + lvl_mod = 1; + else if (settx_param.mode == 5) + lvl_mod = 2; + if (settx_param.mode >= 4) + lvl_idx = settx_param.rate & 0xF; + else if (settx_param.mode >= 2) + lvl_idx = settx_param.rate & 0x7; + else + lvl_idx = settx_param.rate; + + buf[0] = lvl_band; + buf[1] = lvl_mod; + buf[2] = lvl_idx; + buf[3] = lvl_pwr; + + set_p = 1; + } + settx_param.max_pwr = POWER_LEVEL_INVALID_VAL; + AICWFDBG(LOGINFO, "txparam:%d,%d,%d,%d,%d,%d\n", settx_param.chan, settx_param.bw, + settx_param.mode, settx_param.rate, settx_param.length, settx_param.tx_intv_us); +#ifdef CONFIG_POWER_LIMIT + r_idx = get_ccode_region(country_code); + txpwr_loss = &txpwr_loss_tmp; + get_userconfig_txpwr_loss(txpwr_loss); + if (txpwr_loss->loss_enable_2g4 == 1) + AICWFDBG(LOGINFO, "%s:loss_value_2g4: %d\r\n", __func__, + txpwr_loss->loss_value_2g4); + if (txpwr_loss->loss_enable_5g == 1) + AICWFDBG(LOGINFO, "%s:loss_value_5g: %d\r\n", __func__, + txpwr_loss->loss_value_5g); + max_pwr = get_powerlimit_by_chnum(settx_param.chan, r_idx, settx_param.bw); + if (settx_param.chan >= 36) { + if (txpwr_loss->loss_enable_5g == 1) + max_pwr -= txpwr_loss->loss_value_5g; + } else { + if (txpwr_loss->loss_enable_2g4 == 1) + max_pwr -= txpwr_loss->loss_value_2g4; + } + if (!set_p || (lvl_pwr == 255)) { + settx_param.max_pwr = max_pwr; + AICWFDBG(LOGINFO, "max_pwr:%d\n", settx_param.max_pwr); + } else + AICWFDBG(LOGINFO, "the specified power is input without power limit\n"); +#endif + + if (set_p && (lvl_pwr != 255)) + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRLVL, 4, buf, &cfm); + + rwnx_send_rftest_req(rwnx_hw, SET_TX, sizeof(cmd_rf_settx_t), (u8_l *)&settx_param, NULL); + return 0; +} + +static int aic_priv_cmd_set_txstop (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + rwnx_send_rftest_req(rwnx_hw, SET_TXSTOP, 0, NULL, NULL); + return 0; +} + +static int aic_priv_cmd_set_rx (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_rf_rx_t setrx_param; + + if (argc < 3) { + return -EINVAL; + } + setrx_param.chan = command_strtoul(argv[1], NULL, 10); + setrx_param.bw = command_strtoul(argv[2], NULL, 10); + rwnx_send_rftest_req(rwnx_hw, SET_RX, sizeof(cmd_rf_rx_t), (u8_l *)&setrx_param, NULL); + return 0; +} + +static int aic_priv_cmd_get_rx_result (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + rwnx_send_rftest_req(rwnx_hw, GET_RX_RESULT, 0, NULL, &cfm); + memcpy(command, &cfm.rftest_result[0], 8); + return 8; +} + +static int aic_priv_cmd_set_rxstop (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + rwnx_send_rftest_req(rwnx_hw, SET_RXSTOP, 0, NULL, NULL); + return 0; +} + +static int aic_priv_cmd_set_tx_tone (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + u8_l buf[2]; + s8_l freq_ = 0; + + AICWFDBG(LOGINFO, "%s argc:%d\n", argv[0], argc); + if ((argc == 2) || (argc == 3)) { + AICWFDBG(LOGINFO, "argv 1:%s\n",argv[1]); + func = (u8_l)command_strtoul(argv[1], NULL, 16); + if (argc == 3) { + AICWFDBG(LOGINFO, "argv 2:%s\n",argv[2]); + freq_ = (u8_l)command_strtoul(argv[2], NULL, 10); + } else { + freq_ = 0; + }; + buf[0] = func; + buf[1] = (u8_l)freq_; + rwnx_send_rftest_req(rwnx_hw, SET_TXTONE, argc - 1, buf, NULL); + } else { + return -EINVAL; + } + return 0; +} + +static int aic_priv_cmd_set_rx_meter (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + s8_l freq = 0; + + freq = (int)command_strtoul(argv[1], NULL, 10); + rwnx_send_rftest_req(rwnx_hw, SET_RX_METER, sizeof(freq), (u8_l *)&freq, NULL); + return 0; +} + +static int aic_priv_cmd_set_set_power (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l ana_pwr; + u8_l dig_pwr; + u8_l pwr; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (dev->chipid == PRODUCT_ID_AIC8801) { + ana_pwr = command_strtoul(argv[1], NULL, 16); + dig_pwr = command_strtoul(argv[2], NULL, 16); + pwr = (ana_pwr << 4 | dig_pwr); + if (ana_pwr > 0xf || dig_pwr > 0xf) + return -EINVAL; + } else { + ana_pwr = command_strtoul(argv[1], NULL, 10); + pwr = ana_pwr; + if (ana_pwr > 0x1e) + return -EINVAL; + } + AICWFDBG(LOGINFO, "pwr =%x\r\n", pwr); + rwnx_send_rftest_req(rwnx_hw, SET_POWER, sizeof(pwr), (u8_l *)&pwr, NULL); + return 0; +} + +static int aic_priv_cmd_set_xtal_cap (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l xtal_cap; + + if (argc < 2) + return -EINVAL; + + xtal_cap = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "xtal_cap =%x\r\n", xtal_cap); + rwnx_send_rftest_req(rwnx_hw, SET_XTAL_CAP, sizeof(xtal_cap), (u8_l *)&xtal_cap, &cfm); + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_set_xtal_cap_fine (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l xtal_cap_fine; + + if (argc < 2) + return -EINVAL; + + xtal_cap_fine = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "xtal_cap_fine =%x\r\n", xtal_cap_fine); + rwnx_send_rftest_req(rwnx_hw, SET_XTAL_CAP_FINE, sizeof(xtal_cap_fine), (u8_l *)&xtal_cap_fine, &cfm); + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_get_efuse_block (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_rf_getefuse_t getefuse_param; + + if (argc < 2) + return -EINVAL; + + getefuse_param.block = command_strtoul(argv[1], NULL, 10); + rwnx_send_rftest_req(rwnx_hw, GET_EFUSE_BLOCK, sizeof(cmd_rf_getefuse_t), (u8_l *)&getefuse_param, &cfm); + AICWFDBG(LOGINFO, "get val=%x\r\n", cfm.rftest_result[0]); + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_set_freq_cal (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_rf_setfreq_t cmd_setfreq; + + if (argc < 2) + return -EINVAL; + + cmd_setfreq.val = command_strtoul(argv[1], NULL, 16); + AICWFDBG(LOGINFO, "param:%x\r\n", cmd_setfreq.val); + rwnx_send_rftest_req(rwnx_hw, SET_FREQ_CAL, sizeof(cmd_rf_setfreq_t), (u8_l *)&cmd_setfreq, &cfm); + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_set_freq_cal_fine (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_rf_setfreq_t cmd_setfreq; + + if (argc < 2) + return -EINVAL; + + cmd_setfreq.val = command_strtoul(argv[1], NULL, 16); + AICWFDBG(LOGINFO, "param:%x\r\n", cmd_setfreq.val); + rwnx_send_rftest_req(rwnx_hw, SET_FREQ_CAL_FINE, sizeof(cmd_rf_setfreq_t), (u8_l *)&cmd_setfreq, &cfm); + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_get_freq_cal (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u32_l val; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + rwnx_send_rftest_req(rwnx_hw, GET_FREQ_CAL, 0, NULL, &cfm); + memcpy(command, &cfm.rftest_result[0], 4); + val = cfm.rftest_result[0]; + if (dev->chipid != PRODUCT_ID_AIC8801) { + AICWFDBG(LOGINFO, "cap=0x%x (remain:%x), cap_fine=%x (remain:%x)\n", + val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff, (val >> 24) & 0xff); + } else { + AICWFDBG(LOGINFO, "cap=0x%x, cap_fine=0x%x\n", val & 0xff, (val >> 8) & 0xff); + } + return 4; +} + +static int aic_priv_cmd_set_mac_addr (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l mac_addr[6]; + + if (argc < 7) + return -EINVAL; + + mac_addr[5] = command_strtoul(argv[1], NULL, 16); + mac_addr[4] = command_strtoul(argv[2], NULL, 16); + mac_addr[3] = command_strtoul(argv[3], NULL, 16); + mac_addr[2] = command_strtoul(argv[4], NULL, 16); + mac_addr[1] = command_strtoul(argv[5], NULL, 16); + mac_addr[0] = command_strtoul(argv[6], NULL, 16); + AICWFDBG(LOGINFO, "set macaddr:%x,%x,%x,%x,%x,%x\n", mac_addr[5], mac_addr[4], mac_addr[3], mac_addr[2], mac_addr[1], mac_addr[0]); + rwnx_send_rftest_req(rwnx_hw, SET_MAC_ADDR, sizeof(mac_addr), (u8_l *)&mac_addr, NULL); + return 0; +} + +static int aic_priv_cmd_get_mac_addr (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u32_l addr0, addr1; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + rwnx_send_rftest_req(rwnx_hw, GET_MAC_ADDR, 0, NULL, &cfm); + memcpy(command, &cfm.rftest_result[0], 8); + addr0 = cfm.rftest_result[0]; + if ((dev->chipid == PRODUCT_ID_AIC8800DC) || (dev->chipid == PRODUCT_ID_AIC8800DW)) { + int rem_cnt = (cfm.rftest_result[1] >> 16) & 0x00FF; + addr1 = cfm.rftest_result[1] & 0x0000FFFF; + AICWFDBG(LOGINFO, "0x%x,0x%x (remain:%x)\n", addr0, addr1, rem_cnt); + } else { + addr1 = cfm.rftest_result[1]; + AICWFDBG(LOGINFO, "0x%x,0x%x\n", addr0, addr1); + } + return 8; +} + +static int aic_priv_cmd_set_bt_mac_addr (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l mac_addr[6]; + + if (argc < 7) + return -EINVAL; + + mac_addr[5] = command_strtoul(argv[1], NULL, 16); + mac_addr[4] = command_strtoul(argv[2], NULL, 16); + mac_addr[3] = command_strtoul(argv[3], NULL, 16); + mac_addr[2] = command_strtoul(argv[4], NULL, 16); + mac_addr[1] = command_strtoul(argv[5], NULL, 16); + mac_addr[0] = command_strtoul(argv[6], NULL, 16); + AICWFDBG(LOGINFO, "set bt macaddr:%x,%x,%x,%x,%x,%x\n", mac_addr[5], mac_addr[4], mac_addr[3], mac_addr[2], mac_addr[1], mac_addr[0]); + rwnx_send_rftest_req(rwnx_hw, SET_BT_MAC_ADDR, sizeof(mac_addr), (u8_l *)&mac_addr, NULL); + return 0; +} + +static int aic_priv_cmd_get_bt_mac_addr (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u32_l addr0, addr1; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + rwnx_send_rftest_req(rwnx_hw, GET_BT_MAC_ADDR, 0, NULL, &cfm); + memcpy(command, &cfm.rftest_result[0], 8); + addr0 = cfm.rftest_result[0]; + if ((dev->chipid == PRODUCT_ID_AIC8800DC) || (dev->chipid == PRODUCT_ID_AIC8800DW)) { + int rem_cnt = (cfm.rftest_result[1] >> 16) & 0x00FF; + addr1 = cfm.rftest_result[1] & 0x0000FFFF; + AICWFDBG(LOGINFO, "0x%x,0x%x (remain:%x)\n", addr0, addr1, rem_cnt); + } else { + addr1 = cfm.rftest_result[1]; + AICWFDBG(LOGINFO, "0x%x,0x%x\n", addr0, addr1); + } + return 8; +} + +static int aic_priv_cmd_set_vendor_info (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l vendor_info; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + vendor_info = command_strtoul(argv[1], NULL, 16); + AICWFDBG(LOGINFO, "set vendor info:%x\n", vendor_info); + rwnx_send_rftest_req(rwnx_hw, SET_VENDOR_INFO, 1, &vendor_info, &cfm); + if ((dev->chipid == PRODUCT_ID_AIC8800DC) || (dev->chipid == PRODUCT_ID_AIC8800DW)) { + memcpy(command, &cfm.rftest_result[0], 2); + return 2; + } else { + memcpy(command, &cfm.rftest_result[0], 1); + return 1; + } + AICWFDBG(LOGINFO, "0x%x\n", cfm.rftest_result[0]); +} + +static int aic_priv_cmd_get_vendor_info (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + rwnx_send_rftest_req(rwnx_hw, GET_VENDOR_INFO, 0, NULL, &cfm); + if ((dev->chipid == PRODUCT_ID_AIC8800DC) || (dev->chipid == PRODUCT_ID_AIC8800DW)) { + memcpy(command, &cfm.rftest_result[0], 2); + return 2; + } else { + memcpy(command, &cfm.rftest_result[0], 1); + return 1; + } + AICWFDBG(LOGINFO, "0x%x\n", cfm.rftest_result[0]); +} + +static int aic_priv_cmd_rdwr_pwrmm (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + if (argc <= 1) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRMM, 0, NULL, &cfm); + } else { // write + u8_l pwrmm = (u8_l)command_strtoul(argv[1], NULL, 16); + pwrmm = (pwrmm) ? 1 : 0; + AICWFDBG(LOGINFO, "set pwrmm = %x\r\n", pwrmm); + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRMM, sizeof(pwrmm), (u8_l *)&pwrmm, &cfm); + } + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_rdwr_pwridx (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (dev->chipid != PRODUCT_ID_AIC8801){ + AICWFDBG(LOGERROR, "unsupported cmd\n"); + return -EINVAL; + } + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRIDX, 0, NULL, &cfm); + } else if (func <= 2) { // write 2.4g/5g pwr idx + if (argc > 3) { + u8_l type = (u8_l)command_strtoul(argv[2], NULL, 16); + u8_l pwridx = (u8_l)command_strtoul(argv[3], NULL, 10); + u8_l buf[3] = {func, type, pwridx}; + AICWFDBG(LOGINFO, "set pwridx:[%x][%x]=%x\r\n", func, type, pwridx); + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRIDX, sizeof(buf), buf, &cfm); + } else { + return -EINVAL; + } + } else { + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 9); + return 9; +} + +static int aic_priv_cmd_rdwr_pwrlvl (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (dev->chipid == PRODUCT_ID_AIC8801){ + AICWFDBG(LOGERROR, "unsupported cmd\n"); + return -EINVAL; + } + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRLVL, 0, NULL, &cfm); + } else if (func <= 2) { // write 2.4g/5g pwr lvl + if (argc > 4) { + u8_l grp = (u8_l)command_strtoul(argv[2], NULL, 16); + u8_l idx, size; + u8_l buf[14] = {func, grp,}; + if (argc > 12) { // set all grp + AICWFDBG(LOGINFO, "set pwrlvl %s:\n" + " [%x] =", (func == 1) ? "2.4g" : "5g", grp); + if (grp == 1) { // TXPWR_LVL_GRP_11N_11AC + size = 10; + } else { + size = 12; + } + for (idx = 0; idx < size; idx++) { + s8_l pwrlvl = (s8_l)command_strtoul(argv[3 + idx], NULL, 10); + buf[2 + idx] = (u8_l)pwrlvl; + if (idx && !(idx & 0x3)) { + AICWFDBG(LOGINFO, " "); + } + AICWFDBG(LOGINFO, " %2d", pwrlvl); + } + AICWFDBG(LOGINFO, "\n"); + size += 2; + } else { // set grp[idx] + u8_l idx = (u8_l)command_strtoul(argv[3], NULL, 10); + s8_l pwrlvl = (s8_l)command_strtoul(argv[4], NULL, 10); + buf[2] = idx; + buf[3] = (u8_l)pwrlvl; + size = 4; + AICWFDBG(LOGINFO, "set pwrlvl %s:\n" + " [%x][%d] = %d\n", (func == 1) ? "2.4g" : "5g", grp, idx, pwrlvl); + } + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRLVL, size, buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + if((dev->chipid == PRODUCT_ID_AIC8800D81) || (dev->chipid == PRODUCT_ID_AIC8800D81X2) || (dev->chipid == PRODUCT_ID_AIC8800D89X2)){ + memcpy(command, &cfm.rftest_result[0], 6 * 12); + return (6 * 12); + } else { + memcpy(command, &cfm.rftest_result[0], 3 * 12); + return (3 * 12); + } +} + +static int aic_priv_cmd_rdwr_pwrofst (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + int res_len = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_PWROFST, 0, NULL, &cfm); + } else if (func <= 4) { // write 2.4g/5g pwr ofst and ant0/1 + if ((argc > 4) && ((dev->chipid == PRODUCT_ID_AIC8800D81) || (dev->chipid == PRODUCT_ID_AIC8800D81X2) || (dev->chipid == PRODUCT_ID_AIC8800D89X2))) { + u8_l type = (u8_l)command_strtoul(argv[2], NULL, 16); + u8_l chgrp = (u8_l)command_strtoul(argv[3], NULL, 16); + s8_l pwrofst = (u8_l)command_strtoul(argv[4], NULL, 10); + u8_l buf[4] = {func, type, chgrp, (u8_l)pwrofst}; + AICWFDBG(LOGINFO, "set pwrofst_%s:[%x][%x]=%d\r\n", (func == 1) ? "2.4g" : "5g", type, chgrp, pwrofst); + rwnx_send_rftest_req(rwnx_hw, RDWR_PWROFST, sizeof(buf), buf, &cfm); + } else if ((argc > 3) && ((dev->chipid == PRODUCT_ID_AIC8801) || (dev->chipid == PRODUCT_ID_AIC8800DW) || (dev->chipid == PRODUCT_ID_AIC8800DC))) { + u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16); + s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10); + u8_l buf[3] = {func, chgrp, (u8_l)pwrofst}; + AICWFDBG(LOGINFO, "set pwrofst_%s:[%x]=%d\r\n", (func == 1) ? "2.4g" : "5g", chgrp, pwrofst); + rwnx_send_rftest_req(rwnx_hw, RDWR_PWROFST, sizeof(buf), buf, &cfm); + } else { + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + if ((dev->chipid == PRODUCT_ID_AIC8800DC) || (dev->chipid == PRODUCT_ID_AIC8800DW)) { // 3 = 3 (2.4g) + res_len = 3; + } else if (dev->chipid == PRODUCT_ID_AIC8800D81) { // 3 * 2 (2.4g) + 3 * 6 (5g) + res_len = 3 * 3 + 3 * 6; + } else if ((dev->chipid == PRODUCT_ID_AIC8800D81X2) || (dev->chipid == PRODUCT_ID_AIC8800D89X2)) { // ant0/1 + res_len = ( 3 * 3 + 3 * 6 ) * 2; + } else { + res_len = 3 + 4; + } + memcpy(command, &cfm.rftest_result[0], res_len); + return res_len; +} + +static int aic_priv_cmd_rdwr_pwrofstfine (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_PWROFSTFINE, 0, NULL, &cfm); + } else if (func <= 2) { // write 2.4g/5g pwr ofst + if (argc > 3) { + u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16); + s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10); + u8_l buf[3] = {func, chgrp, (u8_l)pwrofst}; + AICWFDBG(LOGINFO, "set pwrofstfine:[%x][%x]=%d\r\n", func, chgrp, pwrofst); + rwnx_send_rftest_req(rwnx_hw, RDWR_PWROFSTFINE, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + + } + memcpy(command, &cfm.rftest_result[0], 7); + return 7; +} + +static int aic_priv_cmd_rdwr_drvibit (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_DRVIBIT, 0, NULL, &cfm); + } else if (func == 1) { // write 2.4g pa drv_ibit + if (argc > 2) { + u8_l ibit = (u8_l)command_strtoul(argv[2], NULL, 16); + u8_l buf[2] = {func, ibit}; + AICWFDBG(LOGINFO, "set drvibit:[%x]=%x\r\n", func, ibit); + rwnx_send_rftest_req(rwnx_hw, RDWR_DRVIBIT, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 16); + return 16; +} + +static int aic_priv_cmd_rdwr_efuse_pwrofst (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + int res_len = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_PWROFST, 0, NULL, &cfm); + } else if (func <= 4) { // write 2.4g/5g pwr ofst and ant0/1 + if ((argc > 4) && ((dev->chipid == PRODUCT_ID_AIC8800D81) || (dev->chipid == PRODUCT_ID_AIC8800D81X2) || (dev->chipid == PRODUCT_ID_AIC8800D89X2))) { + u8_l type = (u8_l)command_strtoul(argv[2], NULL, 16); + u8_l chgrp = (u8_l)command_strtoul(argv[3], NULL, 16); + s8_l pwrofst = (u8_l)command_strtoul(argv[4], NULL, 10); + u8_l buf[4] = {func, type, chgrp, (u8_l)pwrofst}; + AICWFDBG(LOGINFO, "set efuse pwrofst_%s:[%x][%x]=%d\r\n", (func == 1) ? "2.4g" : "5g", type, chgrp, pwrofst); + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_PWROFST, sizeof(buf), buf, &cfm); + } else if ((argc > 3) && ((dev->chipid == PRODUCT_ID_AIC8801) || (dev->chipid == PRODUCT_ID_AIC8800DW) || (dev->chipid == PRODUCT_ID_AIC8800DC))) { + u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16); + s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10); + u8_l buf[3] = {func, chgrp, (u8_l)pwrofst}; + AICWFDBG(LOGINFO, "set efuse pwrofst_%s:[%x]=%d\r\n", (func == 1) ? "2.4g" : "5g", chgrp, pwrofst); + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_PWROFST, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + if ((dev->chipid == PRODUCT_ID_AIC8800DC) || (dev->chipid == PRODUCT_ID_AIC8800DW)) { // 6 = 3 (2.4g) * 2 + res_len = 3 * 2; + } else if (dev->chipid == PRODUCT_ID_AIC8800D81) { // 3 * 2 (2.4g) + 3 * 6 (5g) + res_len = (3 * 3 + 3 * 6) * 2; + } else if((dev->chipid == PRODUCT_ID_AIC8800D81X2) || (dev->chipid == PRODUCT_ID_AIC8800D89X2)) { // 3 * 2 (2.4g) *2 + 3 * 6 (5g) *2 + res_len = (3 * 3 + 3 * 6) * 2 * 2; + } else { // 7 = 3(2.4g) + 4(5g) + res_len = 3 + 4; + } + memcpy(command, &cfm.rftest_result[0], res_len); + return res_len; +} + +static int aic_priv_cmd_rdwr_efuse_pwrofstfine (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_PWROFSTFINE, 0, NULL, &cfm); + } else if (func <= 2) { // write 2.4g/5g pwr ofst + if (argc > 3) { + u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16); + s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10); + u8_l buf[3] = {func, chgrp, (u8_l)pwrofst}; + AICWFDBG(LOGINFO, "set pwrofstfine:[%x][%x]=%d\r\n", func, chgrp, pwrofst); + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_PWROFSTFINE, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 7); + return 7; +} + +static int aic_priv_cmd_rdwr_efuse_drvibit (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_DRVIBIT, 0, NULL, &cfm); + } else if (func == 1) { // write 2.4g pa drv_ibit + if (argc > 2) { + u8_l ibit = (u8_l)command_strtoul(argv[2], NULL, 16); + u8_l buf[2] = {func, ibit}; + AICWFDBG(LOGINFO, "set efuse drvibit:[%x]=%x\r\n", func, ibit); + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_DRVIBIT, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_rdwr_efuse_usrdata (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_ef_usrdata_t cmd_ef_usrdata; + + if (argc <= 1) { // read all + cmd_ef_usrdata.func = 0; + cmd_ef_usrdata.cnt = 3; + } else if (argc >= 2) { // read/write + cmd_ef_usrdata.func = (u8_l)command_strtoul(argv[1], NULL, 10); + cmd_ef_usrdata.cnt = (u8_l)command_strtoul(argv[2], NULL, 10); + if (cmd_ef_usrdata.func == 1) { + int idx; + for (idx = 0; idx < cmd_ef_usrdata.cnt; idx++) { + cmd_ef_usrdata.usrdata[idx] = (u32_l)command_strtoul(argv[3 + idx], NULL, 16); + } + } + } else { + AICWFDBG(LOGERROR, "wrong argc: %x\n", argc); + return -EINVAL; + } + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_USRDATA, sizeof(cmd_ef_usrdata), (u8_l *)&cmd_ef_usrdata, &cfm); + memcpy(command, &cfm.rftest_result[0], 12); + return 12; +} + +static int aic_priv_cmd_rdwr_efuse_sdiocfg (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_SDIOCFG, 0, NULL, &cfm); + } else if (func == 1) { // write sdiocfg + if (argc > 2) { + u8_l ibit = (u8_l)command_strtoul(argv[2], NULL, 16); + u8_l buf[2] = {func, ibit}; + AICWFDBG(LOGINFO, "set efuse sdiocfg:[%x]=%x\r\n", func, ibit); + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_SDIOCFG, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_rdwr_efuse_usbvidpid (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + AICWFDBG(LOGINFO, "read/write usb vid/pid into efuse\n"); + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 16); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_USBVIDPID, 0, NULL, &cfm); + } else if (func == 1) { // write USB vid+pid + if (argc > 2) { + u32_l usb_id = (u32_l)command_strtoul(argv[2], NULL, 16); + u8_l buf[5] = {func, (u8_l)usb_id, (u8_l)(usb_id >> 8), (u8_l)(usb_id >> 16), (u8_l)(usb_id >> 24)}; + AICWFDBG(LOGINFO, "set efuse usb vid/pid:[%x]=%x\r\n", func, usb_id); + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_USBVIDPID, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 4); + return 4; +} + +static int aic_priv_cmd_rdwr_efuse_he_off (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + + func = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "set he off: %d\n", func); + if(func == 1 || func == 0) { + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_HE_OFF, sizeof(func), (u8_l *)&func, &cfm); + AICWFDBG(LOGINFO, "he_off cfm: %d\n", cfm.rftest_result[0]); + memcpy(command, &cfm.rftest_result[0], 4); + return 4; + } + return 0; +} + +static int aic_priv_cmd_set_cal_xtal (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + rwnx_send_rftest_req(rwnx_hw, SET_CAL_XTAL, 0, NULL, NULL); + return 0; +} + +static int aic_priv_cmd_get_cal_xtal_res (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + rwnx_send_rftest_req(rwnx_hw, GET_CAL_XTAL_RES, 0, NULL, &cfm); + memcpy(command, &cfm.rftest_result[0], 4); + AICWFDBG(LOGINFO, "cap=0x%x, cap_fine=0x%x\n", cfm.rftest_result[0] & 0x0000ffff, (cfm.rftest_result[0] >> 16) & 0x0000ffff); + return 4; +} + +static int aic_priv_cmd_set_cob_cal (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_rf_setcobcal_t setcob_cal; + + if (argc < 3) + return -EINVAL; + setcob_cal.dutid = command_strtoul(argv[1], NULL, 10); + setcob_cal.chip_num = command_strtoul(argv[2], NULL, 10); + setcob_cal.dis_xtal = command_strtoul(argv[3], NULL, 10); + rwnx_send_rftest_req(rwnx_hw, SET_COB_CAL, sizeof(cmd_rf_setcobcal_t), (u8_l *)&setcob_cal, NULL); + return 0; +} + +static int aic_priv_cmd_get_cob_cal_res (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l state; + cob_result_ptr_t *cob_result_ptr; + + rwnx_send_rftest_req(rwnx_hw, GET_COB_CAL_RES, 0, NULL, &cfm); + state = (cfm.rftest_result[0] >> 16) & 0x000000ff; + if (!state){ + AICWFDBG(LOGINFO, "cap= 0x%x, cap_fine= 0x%x, freq_ofst= %d Hz\n", + cfm.rftest_result[0] & 0x000000ff, (cfm.rftest_result[0] >> 8) & 0x000000ff, cfm.rftest_result[1]); + cob_result_ptr = (cob_result_ptr_t *) & (cfm.rftest_result[2]); + AICWFDBG(LOGINFO, "golden_rcv_dut= %d , tx_rssi= %d dBm, snr = %d dB\ndut_rcv_godlden= %d , rx_rssi= %d dBm", + cob_result_ptr->golden_rcv_dut_num, cob_result_ptr->rssi_static, cob_result_ptr->snr_static, + cob_result_ptr->dut_rcv_golden_num, cob_result_ptr->dut_rssi_static); + memcpy(command, &cfm.rftest_result, 16); + return 16; + } else { + AICWFDBG(LOGERROR, "cob not idle\n"); + return -EINVAL; + } +} + +static int aic_priv_cmd_do_cob_test (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l state; + cmd_rf_setcobcal_t setcob_cal; + cob_result_ptr_t *cob_result_ptr; + + setcob_cal.dutid = 1; + setcob_cal.chip_num = 1; + setcob_cal.dis_xtal = 0; + if (argc > 1) + setcob_cal.dis_xtal = command_strtoul(argv[1], NULL, 10); + rwnx_send_rftest_req(rwnx_hw, SET_COB_CAL, sizeof(cmd_rf_setcobcal_t), (u8_l *)&setcob_cal, NULL); + msleep(2000); + rwnx_send_rftest_req(rwnx_hw, GET_COB_CAL_RES, 0, NULL, &cfm); + state = (cfm.rftest_result[0] >> 16) & 0x000000ff; + if (!state){ + AICWFDBG(LOGINFO, "cap= 0x%x, cap_fine= 0x%x, freq_ofst= %d Hz\n", + cfm.rftest_result[0] & 0x000000ff, (cfm.rftest_result[0] >> 8) & 0x000000ff, cfm.rftest_result[1]); + cob_result_ptr = (cob_result_ptr_t *) & (cfm.rftest_result[2]); + AICWFDBG(LOGINFO, "golden_rcv_dut= %d , tx_rssi= %d dBm, snr = %d dB\ndut_rcv_godlden= %d , rx_rssi= %d dBm", + cob_result_ptr->golden_rcv_dut_num, cob_result_ptr->rssi_static, cob_result_ptr->snr_static, + cob_result_ptr->dut_rcv_golden_num, cob_result_ptr->dut_rssi_static); + memcpy(command, &cfm.rftest_result, 16); + return 16; + } else { + AICWFDBG(LOGERROR, "cob not idle\n"); + return -EINVAL; + } +} + +static int aic_priv_cmd_set_papr (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + if (argc > 1) { + func = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "papr %d\r\n", func); + rwnx_send_rftest_req(rwnx_hw, SET_PAPR, sizeof(func), (u8_l *)&func, NULL); + } else { + return -EINVAL; + } + return 0; +} + +static int aic_priv_cmd_set_notch (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + if (argc > 1) { + func = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "notch %d\r\n", func); + rwnx_send_rftest_req(rwnx_hw, SET_NOTCH, sizeof(func), (u8_l *)&func, NULL); + } else { + return -EINVAL; + } + return 0; +} + +static int aic_priv_cmd_set_srrc (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + if (argc > 1) { + func = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "srrc %d\r\n", func); + rwnx_send_rftest_req(rwnx_hw, SET_SRRC, sizeof(func), (u8_l *)&func, NULL); + } else { + return -EINVAL; + } + return 0; +} + +static int aic_priv_cmd_set_fss (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + if (argc > 1) { + func = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "fss %d\r\n", func); + rwnx_send_rftest_req(rwnx_hw, SET_FSS, sizeof(func), (u8_l *)&func, NULL); + } else { + return -EINVAL; + } + return 0; +} + +static int aic_priv_cmd_set_usb_off (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + rwnx_send_rftest_req(rwnx_hw, SET_USB_OFF, 0, NULL, NULL); + return 0; +} + +static int aic_priv_cmd_set_pll_test (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0, tx_pwr = 0xc; + s8_l freq = 0; + + if (argc > 1) { + func = command_strtoul(argv[1], NULL, 16); + } + if (argc > 3) { + freq = (s8_l)command_strtoul(argv[2], NULL, 10); + tx_pwr = command_strtoul(argv[3], NULL, 16); + } + if (func <= 1) { + u8_l buf[3] = {func, (u8_l)freq, tx_pwr}; + AICWFDBG(LOGINFO, "set pll_test %d: freq=%d, tx_pwr=0x%x\n", func, freq, tx_pwr); + rwnx_send_rftest_req(rwnx_hw, SET_PLL_TEST, sizeof(buf), buf, &cfm); + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + return 0; +} + +static int aic_priv_cmd_set_ant_mode (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + if (argc > 1) { + func = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "ant %d\r\n", func); + rwnx_send_rftest_req(rwnx_hw, SET_ANT_MODE, sizeof(func), (u8_l *)&func, NULL); + } else { + return -EINVAL; + } + return 0; +} + +static int aic_priv_cmd_rdwr_bt_efuse_pwrofst (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + if (argc == 1) { + func = 0; + } else if (argc == 2) { + func = 1; + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_BT_EFUSE_PWROFST, 0, NULL, &cfm); + } else if (func == 1) { // write bt tx pwrofst + int8_t bt_txpwrofst = command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "set bt efuse pwrofst %d\r\n",bt_txpwrofst); + if (bt_txpwrofst < -7 || bt_txpwrofst > 7) { + AICWFDBG(LOGERROR, "wrong params %d, pwrofst limit -7 ~ 7\n", bt_txpwrofst); + return -EINVAL; + } else { + rwnx_send_rftest_req(rwnx_hw, RDWR_BT_EFUSE_PWROFST, sizeof(bt_txpwrofst), &bt_txpwrofst, &cfm); + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 2); + return 2; +} + +static int aic_priv_cmd_exec_flash_oper(struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + if (argc > 1) { + func = (s8_l)command_strtoul(argv[1], NULL, 10); + AICWFDBG(LOGINFO, "exec_flash_oper: %d\n", func); + rwnx_send_rftest_req(rwnx_hw, EXEC_FLASH_OPER, sizeof(func), &func, &cfm); + AICWFDBG(LOGINFO, "flash oper %u %u \n",cfm.rftest_result[0],cfm.rftest_result[1]); + } else { + AICWFDBG(LOGERROR, "wrong args\n"); + return -EINVAL; + } + + return 0; +} + +static int aic_priv_cmd_country_set(struct rwnx_hw *rwnx_hw, int argc, + char *argv[], char *command) +{ + int ret = 0; + struct ieee80211_regdomain *regdomain; + + if (argc < 2) { + AICWFDBG(LOGINFO, "%s param err\n", __func__); + return -1; + } + + AICWFDBG(LOGINFO, "cmd country_set: %s\n", argv[1]); + + regdomain = getRegdomainFromRwnxDB(rwnx_hw->wiphy, argv[1]); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) + ret = regulatory_set_wiphy_regd( + rwnx_hw->wiphy, regdomain); +#else + ret = wiphy_apply_custom_regulatory( + rwnx_hw->wiphy, regdomain); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */ + +#ifdef CONFIG_RADAR_OR_IR_DETECT + rwnx_radar_set_domain(&rwnx_hw->radar, regdomain->dfs_region); +#endif + +#ifdef CONFIG_POWER_LIMIT + if (!testmode){ + rwnx_send_me_chan_config_req(rwnx_hw, argv[1]); + } +#endif + + return ret; +} + +static int aic_priv_cmd_get_noise(struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + rwnx_send_rftest_req(rwnx_hw, GET_NOISE, 0, NULL, &cfm); + + AICWFDBG(LOGINFO, "noise: %d,%d\n", (char)cfm.rftest_result[0], (char)cfm.rftest_result[1]); + command[0] = (char)cfm.rftest_result[0]; + command[1] = (char)cfm.rftest_result[1]; + + return 2; +} + +static int aic_priv_cmd_rdwr_pwradd2x (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + int8_t pwradd2x_in = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (dev->chipid != PRODUCT_ID_AIC8800D81) { + AICWFDBG(LOGERROR, "RDWR_PWRADD2X, only D40/80 support\n"); + return -EINVAL; + } + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 10); + } + if ((func > 0) && (argc > 2)) { + pwradd2x_in = (int8_t)command_strtoul(argv[2], NULL, 10); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRADD2X, 0, NULL, &cfm); + } else if ((func == 1) || (func == 2)) { // write pwradd2x + AICWFDBG(LOGINFO, "set pwradd2x_%s %d\r\n", (func == 1) ? "2g4" : "5g", pwradd2x_in); + if (pwradd2x_in < -15 || pwradd2x_in > 15) { + AICWFDBG(LOGERROR, "wrong params %d, pwradd2x: -15 ~ 15\n", pwradd2x_in); + return -EINVAL; + } else { + u8_l buf[2] = {func, (u8_l)pwradd2x_in}; + rwnx_send_rftest_req(rwnx_hw, RDWR_PWRADD2X, sizeof(buf), buf, &cfm); + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 2); + return 2; +} + +static int aic_priv_cmd_rdwr_efuse_pwradd2x (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + u8_l func = 0; + int8_t pwradd2x_in = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *dev = g_rwnx_plat->sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *dev = g_rwnx_plat->usbdev; +#endif + + if (dev->chipid != PRODUCT_ID_AIC8800D81) { + AICWFDBG(LOGERROR, "RDWR_EFUSE_PWRADD2X, only D40/80 support\n"); + return -EINVAL; + } + + if (argc > 1) { + func = (u8_l)command_strtoul(argv[1], NULL, 10); + } + if ((func > 0) && (argc > 2)) { + pwradd2x_in = (int8_t)command_strtoul(argv[2], NULL, 10); + } + if (func == 0) { // read cur + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_PWRADD2X, 0, NULL, &cfm); + } else if ((func == 1) || (func == 2)) { // write pwradd2x + AICWFDBG(LOGINFO, "set efuse pwradd2x_%s %d\r\n", (func == 1) ? "2g4" : "5g", pwradd2x_in); + if (pwradd2x_in < -15 || pwradd2x_in > 15) { + AICWFDBG(LOGERROR, "wrong params %d, pwradd2x: -15 ~ 15\n", pwradd2x_in); + return -EINVAL; + } else { + u8_l buf[2] = {func, (u8_l)pwradd2x_in}; + rwnx_send_rftest_req(rwnx_hw, RDWR_EFUSE_PWRADD2X, sizeof(buf), buf, &cfm); + } + } else { + AICWFDBG(LOGERROR, "wrong func: %x\n", func); + return -EINVAL; + } + memcpy(command, &cfm.rftest_result[0], 3); + return 3; +} + + + +static int aic_priv_cmd_check_flash(struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + cmd_check_flash_t cmd_check_flash; + cmd_check_flash.flash_read_addr = FLASH_BIN_ADDR_8800M80; + cmd_check_flash.flash_read_size = get_flash_bin_size(); + rwnx_send_rftest_req(rwnx_hw, CHECK_FLASH, sizeof(cmd_check_flash), (u8_l *)&cmd_check_flash, &cfm); + cfm.rftest_result[1] = get_flash_bin_crc(); + + AICWFDBG(LOGINFO, "flash_crc: %x %x\n", cfm.rftest_result[0], cfm.rftest_result[1]); + memcpy(command, &cfm.rftest_result[0], 8); + + return 8; +} + +static int aic_priv_cmd_help (struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command) +{ + print_help(argc > 0 ? argv[0] : NULL); + return 0; +} + +struct aic_priv_cmd { + const char *cmd; + int (*handler)(struct rwnx_hw *rwnx_hw, int argc, char *argv[], char *command); + const char *usage; +}; + +static const struct aic_priv_cmd aic_priv_commands[] = { + { "set_tx", aic_priv_cmd_set_tx, + " " }, + { "set_txstop", aic_priv_cmd_set_txstop, + "= stop tx " }, + { "set_rx", aic_priv_cmd_set_rx, + " " }, + { "get_rx_result", aic_priv_cmd_get_rx_result, + "= display rx fcsok/total pkt num" }, + { "set_rxstop", aic_priv_cmd_set_rxstop, + "= stop rx " }, + { "set_txtone", aic_priv_cmd_set_tx_tone, + " val = 0/off" }, + { "set_rx_meter", aic_priv_cmd_set_rx_meter, + "= set rx meter " }, + { "set_power", aic_priv_cmd_set_set_power, + " " }, + { "set_xtal_cap", aic_priv_cmd_set_xtal_cap, + " [0 ~ 31]" }, + { "set_xtal_cap_fine", aic_priv_cmd_set_xtal_cap_fine, + " [0 ~ 63]" }, + { "get_efuse_block", aic_priv_cmd_get_efuse_block, + "" }, + { "set_freq_cal", aic_priv_cmd_set_freq_cal, + "" }, + { "set_freq_cal_fine", aic_priv_cmd_set_freq_cal_fine, + "" }, + { "get_freq_cal", aic_priv_cmd_get_freq_cal, + "= display cap & cap fine" }, + { "set_mac_addr", aic_priv_cmd_set_mac_addr, + "= write WiFi MAC into efuse or flash is limited to a maximum of two times" }, + { "get_mac_addr", aic_priv_cmd_get_mac_addr, + "= display WiFi MAC stored in efuse or flash" }, + { "set_bt_mac_addr", aic_priv_cmd_set_bt_mac_addr, + "= write BT MAC into efuse or flash is limited to a maximum of two times" }, + { "get_bt_mac_addr", aic_priv_cmd_get_bt_mac_addr, + "= display BT MAC stored in efuse or flash" }, + { "set_vendor_info", aic_priv_cmd_set_vendor_info, + "= write vendor info into efuse or flash is allowed only once" }, + { "get_vendor_info", aic_priv_cmd_get_vendor_info, + "= display vendor info stored in efuse or flash" }, + { "rdwr_pwrmm", aic_priv_cmd_rdwr_pwrmm, + " = 0/rdwr_pwrlvl, 1/set_power = read/write txpwr manul mode" }, + { "rdwr_pwridx", aic_priv_cmd_rdwr_pwridx, + " " }, + { "rdwr_pwrlvl", aic_priv_cmd_rdwr_pwrlvl, + " " }, + { "rdwr_pwrofst", aic_priv_cmd_rdwr_pwrofst, + " " }, + { "rdwr_pwrofstfine", aic_priv_cmd_rdwr_pwrofstfine, + " " }, + { "rdwr_drvibit", aic_priv_cmd_rdwr_drvibit, + " read/write 8800D pa drvibit" }, + { "set_cal_xtal", aic_priv_cmd_set_cal_xtal, + "= set cal xtal" }, + { "get_cal_xtal_res", aic_priv_cmd_get_cal_xtal_res, + "= get cal xtal result cap & cap_fine" }, + { "set_cob_cal", aic_priv_cmd_set_cob_cal, + " = dut cob test" }, + { "get_cob_cal_res", aic_priv_cmd_get_cob_cal_res, + "= get cob cal result" }, + { "do_cob_test", aic_priv_cmd_do_cob_test, + " = 0/xtal, 1/dis_xtal, 2/only_xtal" }, + { "rdwr_efuse_pwrofst", aic_priv_cmd_rdwr_efuse_pwrofst, + " limited to a maximum of two times" }, + { "rdwr_efuse_pwrofstfine", aic_priv_cmd_rdwr_efuse_pwrofstfine, + " limited to a maximum of two times" }, + { "rdwr_efuse_drvibit", aic_priv_cmd_rdwr_efuse_drvibit, + " = read/write 8800D efuse pa drvibitis allowed only once" }, + { "rdwr_efuse_usrdata", aic_priv_cmd_rdwr_efuse_usrdata, + " = read/write efuse usrdata" }, + { "rdwr_efuse_sdiocfg", aic_priv_cmd_rdwr_efuse_sdiocfg, + " = read/write sdiocfg_bit into efuse" }, + { "rdwr_efuse_usbvidpid", aic_priv_cmd_rdwr_efuse_usbvidpid, + " = read/write usb vid/pid into efuse" }, + { "rdwr_efuse_he_off", aic_priv_cmd_rdwr_efuse_he_off, + " = read/write he_off into efuse" }, + { "set_papr", aic_priv_cmd_set_papr, + " = configure papr filter to optimize sideband suppression" }, + { "set_notch", aic_priv_cmd_set_notch, + " = configure filter to optimize sideband suppression" }, + { "set_srrc", aic_priv_cmd_set_srrc, + " = disable/enable sideband suppression for SRRC" }, + { "set_fss", aic_priv_cmd_set_fss, + " = disable/enable treatment of spurious emissions and burrs" }, + { "set_usb_off", aic_priv_cmd_set_usb_off, + "= off usb configure before usb disconnect" }, + { "set_pll_test", aic_priv_cmd_set_pll_test, + " = use pll test to measure saturation power" }, + { "set_ant", aic_priv_cmd_set_ant_mode, + " = 0/ant0, 1/ant1, 2/both" }, + { "rdwr_bt_efuse_pwrofst", aic_priv_cmd_rdwr_bt_efuse_pwrofst, + " = read/write bt tx power offset into efuse" }, + { "exec_flash_oper",aic_priv_cmd_exec_flash_oper, + " = 0 check, 1 rec, 2 prot 3 rd_wcr0 4 er_wcr0 "}, + {"country_set", aic_priv_cmd_country_set, ""}, + {"get_noise", aic_priv_cmd_get_noise, "get noise"}, + {"rdwr_pwradd2x", aic_priv_cmd_rdwr_pwradd2x, + "a value is added for both 2.4G and 5G to achieve overall power adjustment of the band"}, + {"rdwr_efuse_pwradd2x", aic_priv_cmd_rdwr_efuse_pwradd2x, + "a value is added for both 2.4G and 5G to achieve overall power adjustment of the band, write to efuse"}, + {"check_flash", aic_priv_cmd_check_flash, + "check bin crc in flash" }, + +//Reserve for new aic_priv_cmd. + { "help", aic_priv_cmd_help, + "= show usage help" }, + { NULL, NULL, NULL }, + +}; + + +/* + * Prints command usage, lines are padded with the specified string. + */ +static void print_help(const char *cmd) +{ + int n; + printk("commands:\n"); + for (n = 0; aic_priv_commands[n].cmd; n++) { + if (cmd != NULL) + printk("%s %s\n", aic_priv_commands[n].cmd, aic_priv_commands[n].usage); + } +} + +int handle_private_cmd(struct net_device *net, char *command, u32 cmd_len) +{ + const struct aic_priv_cmd *cmd, *match = NULL; + int count; + int bytes_written = 0; + char *argv[CMD_MAXARGS + 1]; + int argc; + struct rwnx_vif *vif = container_of(net->ieee80211_ptr, struct rwnx_vif, wdev); + struct rwnx_hw *p_rwnx_hw = vif->rwnx_hw; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#if 0 + if(!testmode) { + AICWFDBG(LOGERROR, "not in testmode\n"); + return -1; + } +#endif + + argc = parse_line(command, argv); + if (argc == 0) { + return -1; + } + + count = 0; + cmd = aic_priv_commands; + while (cmd->cmd) { + if (strncasecmp(cmd->cmd, argv[0], strlen(argv[0])) == 0 && + strncasecmp(cmd->cmd, argv[0], strlen(cmd->cmd)) == 0) + { + match = cmd; + if (strcasecmp(cmd->cmd, argv[0]) == 0) { + /* we have an exact match */ + count = 1; + break; + } + count++; + } + cmd++; + } + + if (count > 1) { + AICWFDBG(LOGINFO, "Ambiguous command '%s'; possible commands:", argv[0]); + cmd = aic_priv_commands; + while (cmd->cmd) { + if (strncasecmp(cmd->cmd, argv[0], + strlen(argv[0])) == 0) { + AICWFDBG(LOGINFO, " %s", cmd->cmd); + } + cmd++; + } + AICWFDBG(LOGINFO, "\n"); + } else if (count == 0) { + AICWFDBG(LOGERROR, "Unknown command '%s'\n", argv[0]); + } else { + AICWFDBG(LOGINFO, "match %s", match->cmd); + bytes_written = match->handler(p_rwnx_hw, argc, &argv[0], command); + } + + if (bytes_written < 0) + AICWFDBG(LOGERROR, "wrong param\n"); + + return bytes_written; +} + +#define RWNX_COUNTRY_CODE_LEN 2 +#define CMD_SET_COUNTRY "COUNTRY" +#define CMD_SET_VENDOR_EX_IE "SET_VENDOR_EX_IE" +#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" +#define CMD_SET_TESTMODE "SET_TESTMODE" +#define CMD_SET_BLE_WAKE "SET_BLE_WAKE" +#define CMD_SET_MON_FREQ "SET_MON_FREQ" +#define CMD_GET_CS_INFO "GET_CS_INFO" + + +struct ieee80211_regdomain *getRegdomainFromRwnxDB(struct wiphy *wiphy, char *alpha2); +struct ieee80211_regdomain *getRegdomainFromRwnxDBIndex(struct wiphy *wiphy, int index); +extern int reg_regdb_size; + +#ifdef CONFIG_SET_VENDOR_EXTENSION_IE +extern u8_l vendor_extension_data[256]; +extern u8_l vendor_extension_len; + +void set_vendor_extension_ie(char *command){ + + char databyte[3]={0x00, 0x00, 0x00}; + int skip = strlen(CMD_SET_VENDOR_EX_IE) + 1; + int command_index = skip; + int data_index = 0; + + memset(vendor_extension_data, 0, 256); + vendor_extension_len = 0; + memcpy(databyte, command + command_index, 2); + vendor_extension_len = command_strtoul(databyte, NULL, 16); + AICWFDBG(LOGINFO, "%s len:%d \r\n", __func__, vendor_extension_len); + + //parser command and save data in vendor_extension_data + for(data_index = 0;data_index < vendor_extension_len; data_index++){ + command_index = command_index + 3; + memcpy(databyte, command + command_index, 2); + vendor_extension_data[data_index] = command_strtoul(databyte, NULL, 16); + } + +} +#endif//CONFIG_SET_VENDOR_EXTENSION_IE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)) +int rwnx_cfg80211_set_monitor_channel_(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_chan_def *chandef); +#else +int rwnx_cfg80211_set_monitor_channel_(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef); +#endif +int rwnx_atoi2(char *value, int c_len); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)) +void set_mon_chan(struct rwnx_vif *vif, struct net_device *dev, char *parameter) +#else +void set_mon_chan(struct rwnx_vif *vif, char *parameter) +#endif +{ + struct cfg80211_chan_def *chandef = NULL; + int freq = 0; + + + chandef = (struct cfg80211_chan_def *)vmalloc(sizeof(struct cfg80211_chan_def)); + memset(chandef, 0, sizeof(struct cfg80211_chan_def)); + chandef->chan = (struct ieee80211_channel *)vmalloc(sizeof(struct ieee80211_channel)); + memset(chandef->chan, 0, sizeof(struct ieee80211_channel)); + + freq = rwnx_atoi2(parameter, 4); + + if(freq <= 2484){ + chandef->chan->band = NL80211_BAND_2GHZ; + }else{ + chandef->chan->band = NL80211_BAND_5GHZ; + } + chandef->chan->center_freq = freq; + chandef->width = NL80211_CHAN_WIDTH_20; + chandef->center_freq1 = chandef->chan->center_freq; + chandef->center_freq2 = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)) + rwnx_cfg80211_set_monitor_channel_(vif->rwnx_hw->wiphy, dev, chandef); +#else + rwnx_cfg80211_set_monitor_channel_(vif->rwnx_hw->wiphy, chandef); +#endif + + vfree(chandef->chan); + vfree(chandef); + +} + +int get_cs_info(struct rwnx_vif *vif, u8 *mac_addr, u8 *val) +{ + struct rwnx_sta *sta = NULL; + u8 phymode = 0; + u32 tx_phyrate = 0, rx_phyrate = 0; + struct aicwf_cs_info cs_info; + struct station_info sinfo; + struct rwnx_sta_stats *stats; + struct rx_vector_2 *rx_vect2; + struct rwnx_chanctx *ctxt; + + if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_MONITOR) + return -EINVAL; + else if ((RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_STATION) || + (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_P2P_CLIENT)) { + AICWFDBG(LOGINFO, "%s: sta mode\n", __func__); + if (vif->sta.ap) + sta = vif->sta.ap; + } else { + struct rwnx_sta *sta_iter; + AICWFDBG(LOGINFO, "%s: ap mode, mac=%pM\n", __func__, mac_addr); + spin_lock_bh(&vif->rwnx_hw->cb_lock); + list_for_each_entry(sta_iter, &vif->ap.sta_list, list) { + if (sta_iter->valid && ether_addr_equal(sta_iter->mac_addr, mac_addr)) { + sta = sta_iter; + break; + } + } + spin_unlock_bh(&vif->rwnx_hw->cb_lock); + } + + memset(&cs_info, 0, sizeof(struct aicwf_cs_info)); + memcpy(cs_info.countrycode, country_code, 4); + + if((sta == NULL) && (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP)) { + sta = &vif->rwnx_hw->sta_table[vif->ap.bcmc_index]; + ctxt = &vif->rwnx_hw->chanctx_table[vif->ch_index]; + sta->center_freq = ctxt->chan_def.chan->center_freq; + sta->width = ctxt->chan_def.width; + } + + if(sta) { + stats = &sta->stats; + rx_vect2 = &stats->last_rx.rx_vect2; + + AICWFDBG(LOGINFO, "fill: staidx=%d\n", sta->sta_idx); + rwnx_fill_station_info(sta, vif, &sinfo, &phymode, &tx_phyrate, &rx_phyrate); + + cs_info.rssi = sinfo.signal; + //cs_info.bandwidth = sta->width; + if(sta->width < NL80211_CHAN_WIDTH_40) + cs_info.bandwidth = 0; + else if(sta->width < NL80211_CHAN_WIDTH_80) + cs_info.bandwidth = 1; + else + cs_info.bandwidth = 2; + cs_info.freq = sta->center_freq; + + cs_info.phymode = phymode; // 0:b 1:g 2:a 3:n 4:ac 5:ax + //snr (int8_t)rx_vect2->evm1, (int8_t)rx_vect2->evm2 + cs_info.snr = (int8_t)(rx_vect2->evm1) + (int8_t)(rx_vect2->evm2) / 2; + cs_info.noise = cs_info.rssi - cs_info.snr; //rssi - snr + + //chanutil TBD + cs_info.chan_time_ms = stats->last_chan_time; + cs_info.chan_time_busy_ms = stats->last_chan_busy_time; + cs_info.tx_ack_succ_stat = stats->tx_ack_succ_stat; + cs_info.tx_ack_fail_stat = stats->tx_ack_fail_stat; + cs_info.chan_tx_busy_time = stats->last_chan_tx_busy_time; + + cs_info.txpwr = cs_info.freq >5000? userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[0] : userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[0]; + if(sta->sta_idx < NX_REMOTE_STA_MAX) { + cs_info.rxnss = sinfo.rxrate.nss; + cs_info.rxmcs = sinfo.rxrate.mcs; + cs_info.txnss = sinfo.txrate.nss; + cs_info.txmcs = sinfo.txrate.mcs; + } + + cs_info.tx_phyrate = tx_phyrate; + cs_info.rx_phyrate = rx_phyrate; + + memcpy(val, &cs_info, sizeof(struct aicwf_cs_info)); + + AICWFDBG(LOGINFO, "phymode=%d. bw=%d, rssi=%d, tx_phyrate=%d, rx_phyrate=%d\n", cs_info.phymode, cs_info.bandwidth, cs_info.rssi, + tx_phyrate, rx_phyrate); + + return sizeof(struct aicwf_cs_info); + } + + return 0; +} + +int android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) +{ +#define PRIVATE_COMMAND_MAX_LEN 8192 +#define PRIVATE_COMMAND_DEF_LEN 4096 + + struct rwnx_vif *vif = netdev_priv(net); + int ret = 0; + char *command = NULL; + int bytes_written = 0; + android_wifi_priv_cmd priv_cmd; + int buf_size = 0; + int skip = 0; + char *country = NULL; + struct ieee80211_regdomain *regdomain; + //int index = 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + ///todo: add our lock + //net_os_wake_lock(net); + + +/* if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + goto exit; + }*/ + if (!ifr->ifr_data) { + ret = -EINVAL; + goto exit; + } + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { + compat_android_wifi_priv_cmd compat_priv_cmd; + if (copy_from_user(&compat_priv_cmd, ifr->ifr_data, sizeof(compat_android_wifi_priv_cmd))) { + ret = -EFAULT; + goto exit; + } + priv_cmd.buf = compat_ptr(compat_priv_cmd.buf); + priv_cmd.used_len = compat_priv_cmd.used_len; + priv_cmd.total_len = compat_priv_cmd.total_len; + } else +#endif /* CONFIG_COMPAT */ + { + if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) { + ret = -EFAULT; + goto exit; + } + } + if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) { + AICWFDBG(LOGERROR, "%s: buf length invalid:%d\n", __FUNCTION__, priv_cmd.total_len); + ret = -EINVAL; + goto exit; + } + + buf_size = max(priv_cmd.total_len, PRIVATE_COMMAND_DEF_LEN); + command = kmalloc((buf_size + 1), GFP_KERNEL); + + if (!command) + { + AICWFDBG(LOGERROR, "%s: failed to allocate memory\n", __FUNCTION__); + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) { + ret = -EFAULT; + goto exit; + } + command[priv_cmd.total_len] = '\0'; + + /* outputs */ + AICWFDBG(LOGINFO, "%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name); + AICWFDBG(LOGINFO, "cmd = %d\n", cmd); + AICWFDBG(LOGINFO, "buf_size=%d\n", buf_size); + + +#if 1//Handle Android command + if(!strncasecmp(command, CMD_SET_COUNTRY, strlen(CMD_SET_COUNTRY)) && + strncasecmp(command, "country_set", strlen("country_set"))) { + skip = strlen(CMD_SET_COUNTRY) + 1; + country = command + skip; + if (!country || strlen(country) < RWNX_COUNTRY_CODE_LEN) { + AICWFDBG(LOGERROR, "%s: invalid country code\n", __func__); + ret = -EINVAL; + goto exit; + } +#if 0 + for(index = 0; index < reg_regdb_size; index++){ + regdomain = getRegdomainFromRwnxDBIndex(vif->rwnx_hw->wiphy, index); + if((ret = regulatory_set_wiphy_regd(vif->rwnx_hw->wiphy, regdomain))){ + AICWFDBG(LOGERROR, "regulatory_set_wiphy_regd fail \r\n"); + }else{ + AICWFDBG(LOGINFO, "regulatory_set_wiphy_regd ok \r\n"); + } + } +#endif + AICWFDBG(LOGINFO, "%s country code:%c%c\n", __func__, toupper(country[0]), toupper(country[1])); + regdomain = getRegdomainFromRwnxDB(vif->rwnx_hw->wiphy, country); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) + if((ret = regulatory_set_wiphy_regd(vif->rwnx_hw->wiphy, regdomain))){ + AICWFDBG(LOGERROR, "regulatory_set_wiphy_regd fail \r\n"); + } +#else + wiphy_apply_custom_regulatory(vif->rwnx_hw->wiphy, regdomain); +#endif + +#ifdef CONFIG_RADAR_OR_IR_DETECT + rwnx_radar_set_domain(&vif->rwnx_hw->radar, regdomain->dfs_region); +#endif + +#ifdef CONFIG_POWER_LIMIT + if (!testmode){ + rwnx_send_me_chan_config_req(vif->rwnx_hw, country); + } +#endif + ret = 0; + goto exit; + } +#ifdef CONFIG_SET_VENDOR_EXTENSION_IE + else if(!strncasecmp(command, CMD_SET_VENDOR_EX_IE, strlen(CMD_SET_VENDOR_EX_IE))){ + set_vendor_extension_ie(command); + ret = 0; + goto exit; + } +#endif//CONFIG_SET_VENDOR_EXTENSION_IE + else if(!strncasecmp(command, CMD_SET_AP_WPS_P2P_IE, strlen(CMD_SET_AP_WPS_P2P_IE))){ + ret = 0; + goto exit; + }else if(!strncasecmp(command, CMD_SET_TESTMODE, strlen(CMD_SET_TESTMODE))){ + if(g_rwnx_plat && g_rwnx_plat->usbdev->rwnx_hw){ + if (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW || + (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) || + (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800D81) || + (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800D81X2) || + (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800D89X2)){ + set_testmode(!testmode); + rwnx_send_reboot(g_rwnx_plat->usbdev->rwnx_hw); + } + } + ret = 0; + goto exit; + }else if(!strncasecmp(command, CMD_SET_BLE_WAKE, strlen(CMD_SET_BLE_WAKE))){ + if(g_rwnx_plat && g_rwnx_plat->usbdev->rwnx_hw){ + set_testmode(5); + rwnx_send_reboot(g_rwnx_plat->usbdev->rwnx_hw); + } + ret = 0; + goto exit; + } +#ifdef CONFIG_RWNX_MON_DATA + else if(!strncasecmp(command, CMD_SET_MON_FREQ, strlen(CMD_SET_MON_FREQ))){ + char *set_parameter; + skip = strlen(CMD_SET_MON_FREQ) + 1; + set_parameter = command + skip; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)) + set_mon_chan(vif, net, set_parameter); +#else + set_mon_chan(vif, set_parameter); +#endif + ret = 0; + goto exit; + } +#endif + else if(!strncasecmp(command, CMD_GET_CS_INFO, strlen(CMD_GET_CS_INFO))) { + u8 mac_addr[6]; + u8 val[128]; + int i; + u8 *offset; + + AICWFDBG(LOGDEBUG, "CMD_GET_CS_INFO,len=%d\n", priv_cmd.total_len); + offset = command + (strlen(CMD_GET_CS_INFO) + 1); + for(i=0; i<6; i++) { + mac_addr[i] = command_strtoul(offset, NULL, 16); + offset += 3; + } + bytes_written = get_cs_info(vif, mac_addr, val); + AICWFDBG(LOGDEBUG, "bytewritten=%d, addr=%pM\n", bytes_written, mac_addr); + + + memcpy(command, val, bytes_written); + + goto exit; + } + +#endif//Handle Android command + + bytes_written = handle_private_cmd(net, command, priv_cmd.total_len); + +exit: + if (bytes_written >= 0) { + if ((bytes_written == 0) && (priv_cmd.total_len > 0)) { + command[0] = '\0'; + } + if (bytes_written >= priv_cmd.total_len) { + AICWFDBG(LOGINFO, "%s: err. bytes_written:%d >= buf_size:%d \n", + __FUNCTION__, bytes_written, buf_size); + goto exit; + } + bytes_written++; + priv_cmd.used_len = bytes_written; + if (copy_to_user(priv_cmd.buf, command, bytes_written)) { + AICWFDBG(LOGINFO, "%s: failed to copy data to user buffer\n", __FUNCTION__); + ret = -EFAULT; + } + } + else { + /* Propagate the error */ + ret = bytes_written; + } + + ///todo: add our unlock + //net_os_wake_unlock(net); + kfree(command); + return ret; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_priv_cmd.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_priv_cmd.h new file mode 100644 index 0000000..50c9783 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_priv_cmd.h @@ -0,0 +1,64 @@ +/** + ****************************************************************************** + * + * @file private_cmd.h + * + * Copyright (C) Aicsemi 2018-2024 + * + ****************************************************************************** + */ + +#ifndef _AIC_PRIV_CMD_H_ +#define _AIC_PRIV_CMD_H_ + +#include "rwnx_defs.h" + +typedef struct _android_wifi_priv_cmd { + char *buf; + int used_len; + int total_len; +} android_wifi_priv_cmd; + +struct aicwf_cs_info { + u8_l phymode; + u8_l bandwidth; + u16_l freq; + + s8_l rssi; + s8_l snr; + s8_l noise; + u8_l txpwr; + + //chanutil + u16_l chan_time_ms; + u16_l chan_time_busy_ms; + + char countrycode[4]; + u8_l rxnss; + u8_l rxmcs; + u8_l txnss; + u8_l txmcs; + + u32_l tx_phyrate; + u32_l rx_phyrate; + + u32_l tx_ack_succ_stat; + u32_l tx_ack_fail_stat; + + u16_l chan_tx_busy_time; +}; + +#ifdef CONFIG_COMPAT +typedef struct _compat_android_wifi_priv_cmd { + compat_caddr_t buf; + int used_len; + int total_len; +} compat_android_wifi_priv_cmd; +#endif /* CONFIG_COMPAT */ + +int android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd); +int get_cs_info(struct rwnx_vif *vif, u8 *mac_addr, u8 *val); + + +#endif /* _AIC_PRIV_CMD_H_ */ + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_vendor.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_vendor.c new file mode 100644 index 0000000..5394c71 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_vendor.c @@ -0,0 +1,1073 @@ +#include "aic_vendor.h" +#include "rwnx_defs.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "rwnx_version_gen.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + +static struct wifi_ring_buffer_status ring_buffer[] = { + { + .name = "aicwf_ring_buffer0", + .flags = 0, + .ring_id = 0, + .verbose_level = 0, + .written_bytes = 0, + .read_bytes = 0, + .written_records = 0, + }, +}; + +static struct wlan_driver_wake_reason_cnt_t wake_reason_cnt = { + .total_cmd_event_wake = 10, +}; + +enum apf_attributes { + APF_ATTRIBUTE_VERSION, + APF_ATTRIBUTE_MAX_LEN, + APF_ATTRIBUTE_PROGRAM, + APF_ATTRIBUTE_PROGRAM_LEN, + APF_ATTRIBUTE_MAX +}; + +#endif + +int aic_dev_start_mkeep_alive(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u8 mkeep_alive_id, u8 *ip_pkt, u16 ip_pkt_len, u8 *src_mac, u8 *dst_mac, u32 period_msec) +{ + u8 *data, *pos; + + data = kzalloc(ip_pkt_len + 14, GFP_KERNEL); + if (!data) + return -ENOMEM; + + pos = data; + memcpy(pos, dst_mac, 6); + pos += 6; + memcpy(pos, src_mac, 6); + pos += 6; + /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */ + *(pos++) = 0x08; + *(pos++) = 0x00; + + /* Mapping IP pkt */ + memcpy(pos, ip_pkt, ip_pkt_len); + pos += ip_pkt_len; + + //add send 802.3 pkt(raw data) + kfree(data); + + return 0; +} + +int aic_dev_stop_mkeep_alive(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, u8 mkeep_alive_id) +{ + int res = -1; + + /* + * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, + * dongle shall reject a mkeep_alive request. + */ + if (rwnx_vif->wdev.iftype != NL80211_IFTYPE_STATION) + return res; + + printk("%s execution\n", __func__); + + //add send stop keep alive + res = 0; + return res; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +static int aicwf_vendor_start_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + /* max size of IP packet for keep alive */ + const int MKEEP_ALIVE_IP_PKT_MAX = 256; + + int ret = 0, rem, type; + u8 mkeep_alive_id = 0; + u8 *ip_pkt = NULL; + u16 ip_pkt_len = 0; + u8 src_mac[6]; + u8 dst_mac[6]; + u32 period_msec = 0; + const struct nlattr *iter; + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); + gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + printk("%s\n", __func__); + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case MKEEP_ALIVE_ATTRIBUTE_ID: + mkeep_alive_id = nla_get_u8(iter); + break; + case MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN: + ip_pkt_len = nla_get_u16(iter); + if (ip_pkt_len > MKEEP_ALIVE_IP_PKT_MAX) { + ret = -EINVAL; + goto exit; + } + break; + case MKEEP_ALIVE_ATTRIBUTE_IP_PKT: + if (!ip_pkt_len) { + ret = -EINVAL; + printk("ip packet length is 0\n"); + goto exit; + } + ip_pkt = (u8 *)kzalloc(ip_pkt_len, kflags); + if (ip_pkt == NULL) { + ret = -ENOMEM; + printk("Failed to allocate mem for ip packet\n"); + goto exit; + } + memcpy(ip_pkt, (u8 *)nla_data(iter), ip_pkt_len); + break; + case MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR: + memcpy(src_mac, nla_data(iter), 6); + break; + case MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR: + memcpy(dst_mac, nla_data(iter), 6); + break; + case MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC: + period_msec = nla_get_u32(iter); + break; + default: + pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + ret = -EINVAL; + goto exit; + } + } + + if (ip_pkt == NULL) { + ret = -EINVAL; + printk("ip packet is NULL\n"); + goto exit; + } + + ret = aic_dev_start_mkeep_alive(rwnx_hw, rwnx_vif, mkeep_alive_id, ip_pkt, ip_pkt_len, src_mac, + dst_mac, period_msec); + if (ret < 0) { + printk("start_mkeep_alive is failed ret: %d\n", ret); + } + +exit: + if (ip_pkt) { + kfree(ip_pkt); + } + + return ret; +} + +static int aicwf_vendor_stop_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, rem, type; + u8 mkeep_alive_id = 0; + const struct nlattr *iter; + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); + + printk("%s\n", __func__); + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case MKEEP_ALIVE_ATTRIBUTE_ID: + mkeep_alive_id = nla_get_u8(iter); + break; + default: + pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + ret = -EINVAL; + break; + } + } + + ret = aic_dev_stop_mkeep_alive(rwnx_hw, rwnx_vif, mkeep_alive_id); + if (ret < 0) { + printk("stop_mkeep_alive is failed ret: %d\n", ret); + } + + return ret; +} + +static int aicwf_vendor_get_ver(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, rem, type; + const struct nlattr *iter; + int payload = 0; + char version[128]; + int attr = -1; + struct sk_buff *reply; + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case LOGGER_ATTRIBUTE_DRIVER_VER: + memcpy(version, RWNX_VERS_BANNER, sizeof(RWNX_VERS_BANNER)); + payload = strlen(version); + attr = LOGGER_ATTRIBUTE_DRIVER_VER; + break; + case LOGGER_ATTRIBUTE_FW_VER: + memcpy(version, wiphy->fw_version, sizeof(wiphy->fw_version)); + payload = strlen(version); + attr = LOGGER_ATTRIBUTE_FW_VER; + break; + default: + AICWFDBG(LOGERROR, "%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } + + if (attr < 0) + return -EINVAL; + + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + + if (!reply) + return -ENOMEM; + + if (nla_put(reply, attr, + payload, version)) { + wiphy_err(wiphy, "%s put version error\n", __func__); + goto out_put_fail; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + wiphy_err(wiphy, "%s reply cmd error\n", __func__); + return ret; + +out_put_fail: + kfree_skb(reply); + return -EMSGSIZE; +} + +static int aicwf_vendor_subcmd_get_channel_list(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, rem, type; + const struct nlattr *iter; + struct sk_buff *reply; + int num_channels = 0; + int *channel_list = NULL; + int payload; + int i = 0; + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ]; + struct ieee80211_supported_band *rwnx_band_5GHz = rwnx_hw->wiphy->bands[NL80211_BAND_5GHZ]; + + num_channels += rwnx_band_2GHz->n_channels; + num_channels += (rwnx_hw->band_5g_support) ? rwnx_band_5GHz->n_channels : 0; + + channel_list = (int *)kzalloc(sizeof(int) * num_channels, GFP_KERNEL); + if (!channel_list) + return -ENOMEM; + + for (i = 0; i < rwnx_band_2GHz->n_channels; i++) + channel_list[i] = rwnx_band_2GHz->channels[i].center_freq; + + for (; rwnx_hw->band_5g_support && i < num_channels; i++) + channel_list[i] = rwnx_band_5GHz->channels[i].center_freq; + + payload = sizeof(num_channels) + sizeof(int) * num_channels + 4; + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case GSCAN_ATTRIBUTE_BAND: + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + + if (!reply) + return -ENOMEM; + + if (nla_put_u32(reply, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels)) + goto out_put_fail; + + if (nla_put(reply, GSCAN_ATTRIBUTE_CHANNEL_LIST, sizeof(int) * num_channels, channel_list)) + goto out_put_fail; + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + wiphy_err(wiphy, "%s reply cmd error\n", __func__); + break; + default: + pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } + + kfree(channel_list); + return ret; + +out_put_fail: + kfree(channel_list); + kfree_skb(reply); + return -EMSGSIZE; +} + +struct ieee80211_regdomain *getRegdomainFromRwnxDB(struct wiphy *wiphy, char *alpha2); + +static int aicwf_vendor_subcmd_set_country_code(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, rem, type; + const struct nlattr *iter; + struct ieee80211_regdomain *regdomain; + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case ANDR_WIFI_ATTRIBUTE_COUNTRY: + printk("%s(%d), ANDR_WIFI_ATTRIBUTE_COUNTRY: %s\n", __func__, __LINE__, (char *)nla_data(iter)); + regdomain = getRegdomainFromRwnxDB(wiphy, (char *)nla_data(iter)); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) + if((ret = regulatory_set_wiphy_regd(wiphy, regdomain))){ + AICWFDBG(LOGERROR, "regulatory_set_wiphy_regd fail \r\n"); + } +#else + wiphy_apply_custom_regulatory(wiphy, regdomain); +#endif + break; + default: + pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } + + /* TODO + * Add handle in the future! + */ + + return ret; +} + +static int aicwf_vendor_logger_trigger_memory_dump(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + /* TODO + * Add handle in the future! + */ + return 0; +} + +static int aicwf_vendor_subcmd_get_feature_set(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret; + struct sk_buff *reply; + uint32_t feature = 0, payload; + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + + payload = sizeof(feature); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + + if (!reply) + return -ENOMEM; + + /* TODO + * Add handle in the future! + */ + /*bit 1:Basic infrastructure mode*/ + if (wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) + feature |= WIFI_FEATURE_INFRA; + + /*bit 2:Support for 5 GHz Band*/ + if (rwnx_hw->band_5g_support) + feature |= WIFI_FEATURE_INFRA_5G; + + /*bit3:HOTSPOT is a supplicant feature, enable it by default*/ + feature |= WIFI_FEATURE_HOTSPOT; + + /*bit 4:P2P*/ + if ((wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) && + (wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_GO))) + feature |= WIFI_FEATURE_P2P; + + /*bit 5:soft AP feature supported*/ + if (wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) + feature |= WIFI_FEATURE_SOFT_AP; + + /*bit 18:WiFi Logger*/ + feature |= WIFI_FEATURE_LOGGER; + + /*bit 21:WiFi mkeep_alive*/ + feature |= WIFI_FEATURE_MKEEP_ALIVE; + + if (nla_put_u32(reply, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, feature)) { + wiphy_err(wiphy, "%s put u32 error\n", __func__); + goto out_put_fail; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + wiphy_err(wiphy, "%s reply cmd error\n", __func__); + + return ret; + +out_put_fail: + kfree_skb(reply); + return -EMSGSIZE; +} + +static int aicwf_vendor_logger_get_feature(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret; + struct sk_buff *reply; + uint32_t feature = 0, payload; + + payload = sizeof(feature); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + + if (!reply) + return -ENOMEM; + + feature |= WIFI_LOGGER_MEMORY_DUMP_SUPPORTED; + feature |= WIFI_LOGGER_CONNECT_EVENT_SUPPORTED; + + /*vts will test wake reason state function*/ + feature |= WIFI_LOGGER_WAKE_LOCK_SUPPORTED; + + if (nla_put_u32(reply, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, feature)) { + wiphy_err(wiphy, "put skb u32 failed\n"); + goto out_put_fail; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + wiphy_err(wiphy, "reply cmd error\n"); + + return ret; + +out_put_fail: + kfree_skb(reply); + return -EMSGSIZE; +} + +static int aicwf_vendor_logger_get_ring_status(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret; + struct sk_buff *reply; + uint32_t payload; + uint32_t ring_buffer_nums = sizeof(ring_buffer) / sizeof(ring_buffer[0]); + + payload = sizeof(ring_buffer_nums) + sizeof(ring_buffer); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + + if (!reply) + return -ENOMEM; + + if (nla_put_u32(reply, LOGGER_ATTRIBUTE_RING_NUM, ring_buffer_nums)) { + wiphy_err(wiphy, "put skb u32 failed\n"); + goto out_put_fail; + } + + if (nla_put(reply, LOGGER_ATTRIBUTE_RING_STATUS, sizeof(ring_buffer), ring_buffer)) { + wiphy_err(wiphy, "put skb failed\n"); + goto out_put_fail; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + wiphy_err(wiphy, "reply cmd error\n"); + + return ret; + +out_put_fail: + kfree_skb(reply); + return -EMSGSIZE; +} + +static int aicwf_vendor_logger_start_logging(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, rem, type, intval, size, i; + const struct nlattr *iter; + struct wifi_ring_buffer_status rb; + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case LOGGER_ATTRIBUTE_LOG_LEVEL: + rb.verbose_level = nla_get_u32(iter); + break; + case LOGGER_ATTRIBUTE_RING_FLAGS: + rb.flags = nla_get_u32(iter); + break; + case LOGGER_ATTRIBUTE_LOG_TIME_INTVAL: + intval = nla_get_u32(iter); + break; + case LOGGER_ATTRIBUTE_LOG_MIN_DATA_SIZE: + size = nla_get_u32(iter); + break; + case LOGGER_ATTRIBUTE_RING_NAME: + strcpy(rb.name, nla_data(iter)); + break; + default: + AICWFDBG(LOGERROR, "%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } + + ret = -EINVAL; + for (i = 0; i < sizeof(ring_buffer) / sizeof(ring_buffer[0]); i++) { + if (strcmp(rb.name, ring_buffer[i].name) == 0) { + ret = 0; + break; + } + } + + /* TODO + * Add handle in the future + */ + + return ret; +} + +static int aicwf_vendor_logger_get_ring_data(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, rem, type, i; + const struct nlattr *iter; + struct wifi_ring_buffer_status rb; + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case LOGGER_ATTRIBUTE_RING_NAME: + strcpy(rb.name, nla_data(iter)); + break; + default: + pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } + + ret = -EINVAL; + for (i = 0; i < sizeof(ring_buffer) / sizeof(ring_buffer[0]); i++) { + if (strcmp(rb.name, ring_buffer[i].name) == 0) { + ret = 0; + break; + } + } + + /* TODO + * Add handle in the future + */ + + return ret; +} + +static int aicwf_vendor_logger_get_wake_reason_stats(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret; + struct sk_buff *reply; + uint32_t payload; + + payload = sizeof(wake_reason_cnt.total_cmd_event_wake); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + + if (!reply) + return -ENOMEM; + + /* TODO + * Add handle in the future + */ + if (nla_put_u32(reply, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, wake_reason_cnt.total_cmd_event_wake)) + goto out_put_fail; + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + wiphy_err(wiphy, "reply cmd error\n"); + + return ret; + +out_put_fail: + kfree_skb(reply); + return -EMSGSIZE; +} + +static int aicwf_vendor_logger_get_tx_pkt_fates(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ +#if 0 + int ret = 0, rem, type; + const struct nlattr *iter; + int fate_num; + int fate_data; + + AICWFDBG(LOGDEBUG, "%s Enter\r\n", __func__); + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case LOGGER_ATTRIBUTE_PKT_FATE_NUM: + fate_num = nla_get_u32(iter); + break; + case LOGGER_ATTRIBUTE_PKT_FATE_DATA: + fate_data = nla_get_u64(iter); + break; + default: + AICWFDBG(LOGERROR, "%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } +#endif + /* TODO + * Add handle in the future + */ + return 0; +} + +static int aicwf_vendor_logger_get_rx_pkt_fates(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ +#if 0 + int ret = 0, rem, type; + const struct nlattr *iter; + int fate_num; + int fate_data; + + AICWFDBG(LOGDEBUG, "%s Enter\r\n", __func__); + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case LOGGER_ATTRIBUTE_PKT_FATE_NUM: + fate_num = nla_get_u32(iter); + break; + case LOGGER_ATTRIBUTE_PKT_FATE_DATA: + fate_data = nla_get_u64(iter); + break; + default: + AICWFDBG(LOGERROR, "%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } +#endif + /* TODO + * Add handle in the future + */ + return 0; +} + +static int aicwf_vendor_logger_start_pkt_fate_monitoring(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + /* TODO + * Add handle in the future + */ + return 0; +} + +static int aicwf_vendor_apf_subcmd_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ver, max_len; + int ret = 0; + struct sk_buff *skb = NULL; + + /* APF version */ + ver = 4; + /* APF memory size limit */ + max_len = 2048; + + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, max_len); + + ret = nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver); + if (ret < 0) { + AICWFDBG(LOGERROR, "Failed to put APF_ATTRIBUTE_VERSION, ret:%d\n", ret); + goto fail; + } + ret = nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len); + if (ret < 0) { + AICWFDBG(LOGERROR, "Failed to put APF_ATTRIBUTE_MAX_LEN, ret:%d\n", ret); + goto fail; + } + + ret = cfg80211_vendor_cmd_reply(skb); + if (unlikely(ret)) { + AICWFDBG(LOGERROR, "vendor command reply failed, ret=%d\n", ret); + } + +fail: + /* TODO + * Add handle in the future + */ + return ret; +} + +static int aicwf_vendor_sub_cmd_set_mac(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, rem, type; + const struct nlattr *iter; + u8 mac[ETH_ALEN]; + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case WIFI_VENDOR_ATTR_DRIVER_MAC_ADDR: + memcpy(mac, nla_data(iter), ETH_ALEN); + printk("%s, %02X:%02X:%02X:%02X:%02X:%02X\n", __func__, + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + break; + default: + pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type); + return -EINVAL; + } + } + + /* TODO + * Add handle in the future + */ + + return ret; +} +#endif + +static const struct nla_policy +aicwf_cfg80211_mkeep_alive_policy[MKEEP_ALIVE_ATTRIBUTE_MAX+1] = { + [0] = {.type = NLA_UNSPEC }, + [MKEEP_ALIVE_ATTRIBUTE_ID] = { .type = NLA_U8 }, + [MKEEP_ALIVE_ATTRIBUTE_IP_PKT] = { .type = NLA_MSECS }, + [MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN] = { .type = NLA_U16 }, + [MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR] = { .type = NLA_MSECS, + .len = ETH_ALEN }, + [MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR] = { .type = NLA_MSECS, + .len = ETH_ALEN }, + [MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC] = { .type = NLA_U32 }, +}; + +static const struct nla_policy +aicwf_cfg80211_logger_policy[LOGGER_ATTRIBUTE_MAX + 1] = { + [0] = {.type = NLA_UNSPEC }, + [LOGGER_ATTRIBUTE_DRIVER_VER] = { .type = NLA_BINARY }, + [LOGGER_ATTRIBUTE_FW_VER] = { .type = NLA_BINARY }, + [LOGGER_ATTRIBUTE_LOG_LEVEL] = { .type = NLA_U32 }, + [LOGGER_ATTRIBUTE_RING_FLAGS] = { .type = NLA_U32 }, + [LOGGER_ATTRIBUTE_LOG_TIME_INTVAL] = { .type = NLA_U32 }, + [LOGGER_ATTRIBUTE_LOG_MIN_DATA_SIZE] = { .type = NLA_U32 }, + [LOGGER_ATTRIBUTE_RING_NAME] = { .type = NLA_STRING }, +}; + +static const struct nla_policy +aicwf_cfg80211_subcmd_policy[GSCAN_ATTRIBUTE_MAX + 1] = { + [0] = {.type = NLA_UNSPEC }, + [GSCAN_ATTRIBUTE_BAND] = { .type = NLA_U32 }, +}; + +static const struct nla_policy +aicwf_cfg80211_andr_wifi_policy[ANDR_WIFI_ATTRIBUTE_MAX + 1] = { + [0] = {.type = NLA_UNSPEC }, + [ANDR_WIFI_ATTRIBUTE_COUNTRY] = { .type = NLA_STRING }, +}; + +static const struct nla_policy +aicwf_cfg80211_subcmd_set_mac_policy[WIFI_VENDOR_ATTR_DRIVER_MAX + 1] = { + [0] = {.type = NLA_UNSPEC }, + [WIFI_VENDOR_ATTR_DRIVER_MAC_ADDR] = { .type = NLA_MSECS, .len = ETH_ALEN }, +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +static int aicwf_dump_interface(struct wiphy *wiphy, + struct wireless_dev *wdev, struct sk_buff *skb, + const void *data, int data_len, + unsigned long *storage) +{ + return 0; +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + +const struct wiphy_vendor_command aicwf_vendor_cmd[] = { + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_start_mkeep_alive, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_mkeep_alive_policy, + .maxattr = MKEEP_ALIVE_ATTRIBUTE_MAX +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_stop_mkeep_alive, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_mkeep_alive_policy, + .maxattr = MKEEP_ALIVE_ATTRIBUTE_MAX +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_GET_VER + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_get_ver, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_logger_policy, + .maxattr = LOGGER_ATTRIBUTE_MAX +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_subcmd_get_channel_list, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_subcmd_policy, + .maxattr = GSCAN_ATTRIBUTE_MAX +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = WIFI_SUBCMD_SET_COUNTRY_CODE + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_subcmd_set_country_code, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_andr_wifi_policy, + .maxattr = ANDR_WIFI_ATTRIBUTE_MAX +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_TRIGGER_MEM_DUMP + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_trigger_memory_dump, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = WIFI_SUBCMD_GET_FEATURE_SET + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_subcmd_get_feature_set, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_GET_FEATURE + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_get_feature, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_GET_RING_STATUS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_get_ring_status, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_START_LOGGING + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_start_logging, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_logger_policy, + .maxattr = LOGGER_ATTRIBUTE_MAX +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_GET_RING_DATA + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_get_ring_data, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_logger_policy, + .maxattr = LOGGER_ATTRIBUTE_MAX +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_GET_WAKE_REASON_STATS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_get_wake_reason_stats, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = APF_SUBCMD_GET_CAPABILITIES + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_apf_subcmd_get_capabilities, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_START_PKT_FATE_MONITORING + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_start_pkt_fate_monitoring, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_GET_TX_PKT_FATES + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_get_tx_pkt_fates, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = LOGGER_GET_RX_PKT_FATES + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = aicwf_vendor_logger_get_rx_pkt_fates, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = VENDOR_CMD_RAW_DATA, +#endif + }, + { + { + .vendor_id = GOOGLE_OUI, + .subcmd = VENDOR_NL80211_SUBCMD_SET_MAC + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = aicwf_vendor_sub_cmd_set_mac, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_subcmd_set_mac_policy, + .maxattr = WIFI_VENDOR_ATTR_DRIVER_MAX, +#endif + }, + { + { + .vendor_id = BRCM_OUI, + .subcmd = VENDOR_NL80211_SUBCMD_SET_MAC + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = aicwf_vendor_sub_cmd_set_mac, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + .dumpit = aicwf_dump_interface, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .policy = aicwf_cfg80211_subcmd_set_mac_policy, + .maxattr = WIFI_VENDOR_ATTR_DRIVER_MAX, +#endif + }, +}; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +static const struct nl80211_vendor_cmd_info aicwf_vendor_events[] = { +}; +#endif + +int aicwf_vendor_init(struct wiphy *wiphy) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + wiphy->vendor_commands = aicwf_vendor_cmd; + wiphy->n_vendor_commands = ARRAY_SIZE(aicwf_vendor_cmd); + wiphy->vendor_events = aicwf_vendor_events; + wiphy->n_vendor_events = ARRAY_SIZE(aicwf_vendor_events); +#endif + return 0; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_vendor.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_vendor.h new file mode 100644 index 0000000..0e1e3e0 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aic_vendor.h @@ -0,0 +1,346 @@ +#ifndef _AIC_VENDOR_H +#define _AIC_VENDOR_H + +#include + +#define GOOGLE_OUI 0x001A11 +#define BRCM_OUI 0x001018 + +typedef enum { + START_MKEEP_ALIVE, + STOP_MKEEP_ALIVE, +} GetCmdType; + +typedef enum { + /* don't use 0 as a valid subcommand */ + VENDOR_NL80211_SUBCMD_UNSPECIFIED, + + /* define all vendor startup commands between 0x0 and 0x0FFF */ + VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001, + VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF, + + /* define all GScan related commands between 0x1000 and 0x10FF */ + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000, + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF, + + /* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x11FF, + + /* define all RTT related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF, + + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200, + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF, + + /* define all Logger related commands between 0x1400 and 0x14FF */ + ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START = 0x1400, + ANDROID_NL80211_SUBCMD_DEBUG_RANGE_END = 0x14FF, + + /* define all wifi offload related commands between 0x1600 and 0x16FF */ + ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START = 0x1600, + ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_END = 0x16FF, + + /* define all NAN related commands between 0x1700 and 0x17FF */ + ANDROID_NL80211_SUBCMD_NAN_RANGE_START = 0x1700, + ANDROID_NL80211_SUBCMD_NAN_RANGE_END = 0x17FF, + + /* define all Android Packet Filter related commands between 0x1800 and 0x18FF */ + ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START = 0x1800, + ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_END = 0x18FF, + + /* This is reserved for future usage */ + +} ANDROID_VENDOR_SUB_COMMAND; + +typedef enum { + WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE = ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START, + WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE, +} WIFI_OFFLOAD_SUB_COMMAND; + + +enum mkeep_alive_attributes { + MKEEP_ALIVE_ATTRIBUTE_ID = 0x1, + MKEEP_ALIVE_ATTRIBUTE_IP_PKT, + MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN, + MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR, + MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR, + MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC, + MKEEP_ALIVE_ATTRIBUTE_AFTER_LAST, + MKEEP_ALIVE_ATTRIBUTE_MAX = MKEEP_ALIVE_ATTRIBUTE_AFTER_LAST - 1 +}; + +enum debug_sub_command { + LOGGER_START_LOGGING = ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START, + LOGGER_TRIGGER_MEM_DUMP, + LOGGER_GET_MEM_DUMP, + LOGGER_GET_VER, + LOGGER_GET_RING_STATUS, + LOGGER_GET_RING_DATA, + LOGGER_GET_FEATURE, + LOGGER_RESET_LOGGING, + LOGGER_TRIGGER_DRIVER_MEM_DUMP, + LOGGER_GET_DRIVER_MEM_DUMP, + LOGGER_START_PKT_FATE_MONITORING, + LOGGER_GET_TX_PKT_FATES, + LOGGER_GET_RX_PKT_FATES, + LOGGER_GET_WAKE_REASON_STATS, + LOGGER_DEBUG_GET_DUMP, + LOGGER_FILE_DUMP_DONE_IND, + LOGGER_SET_HAL_START, + LOGGER_HAL_STOP, + LOGGER_SET_HAL_PID, +}; + +enum logger_attributes { + LOGGER_ATTRIBUTE_INVALID = 0, + LOGGER_ATTRIBUTE_DRIVER_VER, + LOGGER_ATTRIBUTE_FW_VER, + LOGGER_ATTRIBUTE_RING_ID, + LOGGER_ATTRIBUTE_RING_NAME, + LOGGER_ATTRIBUTE_RING_FLAGS, + LOGGER_ATTRIBUTE_LOG_LEVEL, + LOGGER_ATTRIBUTE_LOG_TIME_INTVAL, + LOGGER_ATTRIBUTE_LOG_MIN_DATA_SIZE, + LOGGER_ATTRIBUTE_FW_DUMP_LEN, + LOGGER_ATTRIBUTE_FW_DUMP_DATA, + // LOGGER_ATTRIBUTE_FW_ERR_CODE, + LOGGER_ATTRIBUTE_RING_DATA, + LOGGER_ATTRIBUTE_RING_STATUS, + LOGGER_ATTRIBUTE_RING_NUM, + LOGGER_ATTRIBUTE_DRIVER_DUMP_LEN, + LOGGER_ATTRIBUTE_DRIVER_DUMP_DATA, + LOGGER_ATTRIBUTE_PKT_FATE_NUM, + LOGGER_ATTRIBUTE_PKT_FATE_DATA, + LOGGER_ATTRIBUTE_AFTER_LAST, + LOGGER_ATTRIBUTE_MAX = LOGGER_ATTRIBUTE_AFTER_LAST - 1, +}; + +enum wifi_sub_command { + GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START, + GSCAN_SUBCMD_SET_CONFIG, /* 0x1001 */ + GSCAN_SUBCMD_SET_SCAN_CONFIG, /* 0x1002 */ + GSCAN_SUBCMD_ENABLE_GSCAN, /* 0x1003 */ + GSCAN_SUBCMD_GET_SCAN_RESULTS, /* 0x1004 */ + GSCAN_SUBCMD_SCAN_RESULTS, /* 0x1005 */ + GSCAN_SUBCMD_SET_HOTLIST, /* 0x1006 */ + GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG, /* 0x1007 */ + GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS, /* 0x1008 */ + GSCAN_SUBCMD_GET_CHANNEL_LIST, /* 0x1009 */ + WIFI_SUBCMD_GET_FEATURE_SET, /* 0x100A */ + WIFI_SUBCMD_GET_FEATURE_SET_MATRIX, /* 0x100B */ + WIFI_SUBCMD_SET_PNO_RANDOM_MAC_OUI, /* 0x100C */ + WIFI_SUBCMD_NODFS_SET, /* 0x100D */ + WIFI_SUBCMD_SET_COUNTRY_CODE, /* 0x100E */ + /* Add more sub commands here */ + GSCAN_SUBCMD_SET_EPNO_SSID, /* 0x100F */ + WIFI_SUBCMD_SET_SSID_WHITE_LIST, /* 0x1010 */ + WIFI_SUBCMD_SET_ROAM_PARAMS, /* 0x1011 */ + WIFI_SUBCMD_ENABLE_LAZY_ROAM, /* 0x1012 */ + WIFI_SUBCMD_SET_BSSID_PREF, /* 0x1013 */ + WIFI_SUBCMD_SET_BSSID_BLACKLIST, /* 0x1014 */ + GSCAN_SUBCMD_ANQPO_CONFIG, /* 0x1015 */ + WIFI_SUBCMD_SET_RSSI_MONITOR, /* 0x1016 */ + WIFI_SUBCMD_CONFIG_ND_OFFLOAD, /* 0x1017 */ + /* Add more sub commands here */ + GSCAN_SUBCMD_MAX, + APF_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START, + APF_SUBCMD_SET_FILTER, +}; + +enum gscan_attributes { + GSCAN_ATTRIBUTE_NUM_BUCKETS = 10, + GSCAN_ATTRIBUTE_BASE_PERIOD, + GSCAN_ATTRIBUTE_BUCKETS_BAND, + GSCAN_ATTRIBUTE_BUCKET_ID, + GSCAN_ATTRIBUTE_BUCKET_PERIOD, + GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS, + GSCAN_ATTRIBUTE_BUCKET_CHANNELS, + GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN, + GSCAN_ATTRIBUTE_REPORT_THRESHOLD, + GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE, + GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND, + + GSCAN_ATTRIBUTE_ENABLE_FEATURE = 20, + GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, /* indicates no more results */ + GSCAN_ATTRIBUTE_FLUSH_FEATURE, /* Flush all the configs */ + GSCAN_ENABLE_FULL_SCAN_RESULTS, + GSCAN_ATTRIBUTE_REPORT_EVENTS, + + /* remaining reserved for additional attributes */ + GSCAN_ATTRIBUTE_NUM_OF_RESULTS = 30, + GSCAN_ATTRIBUTE_FLUSH_RESULTS, + GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */ + GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */ + GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */ + GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */ + GSCAN_ATTRIBUTE_NUM_CHANNELS, + GSCAN_ATTRIBUTE_CHANNEL_LIST, + GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, + + GSCAN_ATTRIBUTE_AFTER_LAST, + GSCAN_ATTRIBUTE_MAX = GSCAN_ATTRIBUTE_AFTER_LAST - 1, +}; + +enum andr_wifi_attributes { + ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI, + ANDR_WIFI_ATTRIBUTE_NODFS_SET, + ANDR_WIFI_ATTRIBUTE_COUNTRY, + ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE, + // Add more attribute here + ANDR_WIFI_ATTRIBUTE_AFTER_LAST, + ANDR_WIFI_ATTRIBUTE_MAX = ANDR_WIFI_ATTRIBUTE_AFTER_LAST - 1, +}; + +enum wifi_support_feature { + /* Feature enums */ + WIFI_FEATURE_INFRA = 0x0001, /* Basic infrastructure mode */ + WIFI_FEATURE_INFRA_5G = 0x0002, /* Support for 5, GHz Band */ + WIFI_FEATURE_HOTSPOT = 0x0004, /* Support for GAS/ANQP */ + WIFI_FEATURE_P2P = 0x0008, /* Wifi-Direct */ + WIFI_FEATURE_SOFT_AP = 0x0010, /* Soft AP */ + WIFI_FEATURE_GSCAN = 0x0020, /* Google-Scan APIs */ + WIFI_FEATURE_NAN = 0x0040, /* Neighbor Awareness Networking */ + WIFI_FEATURE_D2D_RTT = 0x0080, /* Device-to-device RTT */ + WIFI_FEATURE_D2AP_RTT = 0x0100, /* Device-to-AP RTT */ + WIFI_FEATURE_BATCH_SCAN = 0x0200, /* Batched Scan (legacy) */ + WIFI_FEATURE_PNO = 0x0400, /* Preferred network offload */ + WIFI_FEATURE_ADDITIONAL_STA = 0x0800, /* Support for two STAs */ + WIFI_FEATURE_TDLS = 0x1000, /* Tunnel directed link setup */ + WIFI_FEATURE_TDLS_OFFCHANNEL = 0x2000, /* Support for TDLS off channel */ + WIFI_FEATURE_EPR = 0x4000, /* Enhanced power reporting */ + WIFI_FEATURE_AP_STA = 0x8000, /* Support for AP STA Concurrency */ + WIFI_FEATURE_LINK_LAYER_STATS = 0x10000, /* Support for Linkstats */ + WIFI_FEATURE_LOGGER = 0x20000, /* WiFi Logger */ + WIFI_FEATURE_HAL_EPNO = 0x40000, /* WiFi PNO enhanced */ + WIFI_FEATURE_RSSI_MONITOR = 0x80000, /* RSSI Monitor */ + WIFI_FEATURE_MKEEP_ALIVE = 0x100000, /* WiFi mkeep_alive */ + WIFI_FEATURE_CONFIG_NDO = 0x200000, /* ND offload configure */ + WIFI_FEATURE_TX_TRANSMIT_POWER = 0x400000, /* Capture Tx transmit power levels */ + WIFI_FEATURE_CONTROL_ROAMING = 0x800000, /* Enable/Disable firmware roaming */ + WIFI_FEATURE_IE_WHITELIST = 0x1000000, /* Support Probe IE white listing */ + WIFI_FEATURE_SCAN_RAND = 0x2000000, /* Support MAC & Probe Sequence Number randomization */ + WIFI_FEATURE_INVALID = 0xFFFFFFFF, /* Invalid Feature */ +}; + +enum wifi_logger_feature { + WIFI_LOGGER_MEMORY_DUMP_SUPPORTED = (1 << (0)), // Memory dump of FW + WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), // PKT status + WIFI_LOGGER_CONNECT_EVENT_SUPPORTED = (1 << (2)), // Connectivity event + WIFI_LOGGER_POWER_EVENT_SUPPORTED = (1 << (3)), // POWER of Driver + WIFI_LOGGER_WAKE_LOCK_SUPPORTED = (1 << (4)), // WAKE LOCK of Driver + WIFI_LOGGER_VERBOSE_SUPPORTED = (1 << (5)), // verbose log of FW + WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED = (1 << (6)), // monitor the health of FW + WIFI_LOGGER_DRIVER_DUMP_SUPPORTED = (1 << (7)), // dumps driver state + WIFI_LOGGER_PACKET_FATE_SUPPORTED = (1 << (8)), // tracks connection packets' fate +}; + +enum wake_stats_attributes { + WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, + WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, + WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT, + WAKE_STAT_ATTRIBUTE_CMD_COUNT_USED, + WAKE_STAT_ATTRIBUTE_TOTAL_DRIVER_FW, + WAKE_STAT_ATTRIBUTE_DRIVER_FW_WAKE, + WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT, + WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT_USED, + WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, + WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, + WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, + WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, + WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, + WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, + WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, + WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, + WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, + WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT, + WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT, + WAKE_STAT_ATTRIBUTE_OTHER__RX_MULTICAST_ADD_CNT, + WAKE_STAT_ATTRIBUTE_RX_MULTICAST_PKT_INFO, + WAKE_STAT_ATTRIBUTE_AFTER_LAST, + WAKE_STAT_ATTRIBUTE_MAX = WAKE_STAT_ATTRIBUTE_AFTER_LAST - 1, +}; + +enum vendor_nl80211_subcmd { + /* copied from wpa_supplicant brcm definations */ + VENDOR_NL80211_SUBCMD_UNSPEC = 0, + VENDOR_NL80211_SUBCMD_SET_PMK = 4, + VENDOR_NL80211_SUBCMD_SET_MAC = 6, + VENDOR_NL80211_SCMD_ACS = 9, + VENDOR_NL80211_SCMD_MAX = 10, +}; + +enum nl80211_vendor_subcmd_attributes { + WIFI_VENDOR_ATTR_DRIVER_CMD = 0, + WIFI_VENDOR_ATTR_DRIVER_KEY_PMK = 1, + WIFI_VENDOR_ATTR_DRIVER_MAC_ADDR = 3, + WIFI_VENDOR_ATTR_DRIVER_AFTER_LAST = 5, + WIFI_VENDOR_ATTR_DRIVER_MAX = + WIFI_VENDOR_ATTR_DRIVER_AFTER_LAST - 1, +}; + +typedef int wifi_ring_buffer_id; + +struct wifi_ring_buffer_status { + u8 name[32]; + u32 flags; + wifi_ring_buffer_id ring_id; + u32 ring_buffer_byte_size; + u32 verbose_level; + u32 written_bytes; + u32 read_bytes; + u32 written_records; +}; + +struct rx_data_cnt_details_t { + int rx_unicast_cnt; /*Total rx unicast packet which woke up host */ + int rx_multicast_cnt; /*Total rx multicast packet which woke up host */ + int rx_broadcast_cnt; /*Total rx broadcast packet which woke up host */ +}; + +struct rx_wake_pkt_type_classification_t { + int icmp_pkt; /*wake icmp packet count */ + int icmp6_pkt; /*wake icmp6 packet count */ + int icmp6_ra; /*wake icmp6 RA packet count */ + int icmp6_na; /*wake icmp6 NA packet count */ + int icmp6_ns; /*wake icmp6 NS packet count */ + //ToDo: Any more interesting classification to add? +}; + +struct rx_multicast_cnt_t{ + int ipv4_rx_multicast_addr_cnt; /*Rx wake packet was ipv4 multicast */ + int ipv6_rx_multicast_addr_cnt; /*Rx wake packet was ipv6 multicast */ + int other_rx_multicast_addr_cnt;/*Rx wake packet was non-ipv4 and non-ipv6*/ +}; + +struct wlan_driver_wake_reason_cnt_t { + int total_cmd_event_wake; /* Total count of cmd event wakes */ + int *cmd_event_wake_cnt; /* Individual wake count array, each index a reason */ + int cmd_event_wake_cnt_sz; /* Max number of cmd event wake reasons */ + int cmd_event_wake_cnt_used; /* Number of cmd event wake reasons specific to the driver */ + + int total_driver_fw_local_wake; /* Total count of drive/fw wakes, for local reasons */ + int *driver_fw_local_wake_cnt; /* Individual wake count array, each index a reason */ + int driver_fw_local_wake_cnt_sz; /* Max number of local driver/fw wake reasons */ + int driver_fw_local_wake_cnt_used; /* Number of local driver/fw wake reasons specific to the driver */ + + int total_rx_data_wake; /* total data rx packets, that woke up host */ + struct rx_data_cnt_details_t rx_wake_details; + struct rx_wake_pkt_type_classification_t rx_wake_pkt_classification_info; + struct rx_multicast_cnt_t rx_multicast_wake_pkt_info; +}; + +typedef struct wl_mkeep_alive_pkt { + u16 version; /* Version for mkeep_alive */ + u16 length; /* length of fixed parameters in the structure */ + u32 period_msec; /* high bit on means immediate send */ + u16 len_bytes; + u8 keep_alive_id; /* 0 - 3 for N = 4 */ + u8 data[1]; +} wl_mkeep_alive_pkt_t; + +#endif /* _AIC_VENDOR_H */ + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80.c new file mode 100644 index 0000000..3a9ca06 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80.c @@ -0,0 +1,96 @@ +#include "rwnx_main.h" +#include "rwnx_msg_tx.h" +#include "reg_access.h" + +#define FW_USERCONFIG_NAME_8800D80 "aic_userconfig_8800d80.txt" +#define FW_POWERLIMIT_NAME_8800D80 "aic_powerlimit_8800d80.txt" + +extern char aic_fw_path[200]; + +int rwnx_request_firmware_common(struct rwnx_hw *rwnx_hw, + u32** buffer, const char *filename); +void rwnx_plat_userconfig_parsing(char *buffer, int size); +void rwnx_release_firmware_common(u32** buffer); + + +int aicwf_set_rf_config_8800d80(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm) +{ + int ret = 0; + + if ((ret = rwnx_send_txpwr_lvl_v3_req(rwnx_hw))) { + return -1; + } + if ((ret = rwnx_send_txpwr_lvl_adj_req(rwnx_hw))) { + return -1; + } + if ((ret = rwnx_send_txpwr_ofst2x_req(rwnx_hw))) { + return -1; + } + if ((ret = rwnx_send_rf_calib_req(rwnx_hw, cfm))) { + return -1; + } + return 0 ; +} + + +int rwnx_plat_userconfig_load_8800d80(struct rwnx_hw *rwnx_hw){ + int size; + u32 *dst=NULL; + char *filename = FW_USERCONFIG_NAME_8800D80; + +#ifndef ANDROID_PLATFORM + sprintf(aic_fw_path, "%s/%s", aic_fw_path, "aic8800D80"); +#endif + + AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + rwnx_plat_userconfig_parsing((char *)dst, size); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "userconfig download complete\n\n"); + return 0; + +} + +#ifdef CONFIG_POWER_LIMIT +extern char country_code[]; +int rwnx_plat_powerlimit_load_8800d80(struct rwnx_hw *rwnx_hw) +{ + int size; + u32 *dst=NULL; + char *filename = FW_POWERLIMIT_NAME_8800D80; + + AICWFDBG(LOGINFO, "powerlimit file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of cfg file\n"); + dst = NULL; + return 0; + } + + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + /* parsing the file */ + rwnx_plat_powerlimit_parsing((char *)dst, size, country_code); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "powerlimit download complete\n\n"); + return 0; +} +#endif + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80.h new file mode 100644 index 0000000..134fd1a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80.h @@ -0,0 +1,9 @@ +#include + +int rwnx_plat_userconfig_load_8800d80(struct rwnx_hw *rwnx_hw); +#ifdef CONFIG_POWER_LIMIT +int rwnx_plat_powerlimit_load_8800d80(struct rwnx_hw *rwnx_hw); +#endif +int aicwf_set_rf_config_8800d80(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm); + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80x2.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80x2.c new file mode 100644 index 0000000..2d6c5e0 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80x2.c @@ -0,0 +1,97 @@ +#include "rwnx_main.h" +#include "rwnx_msg_tx.h" +#include "reg_access.h" + +#define FW_USERCONFIG_NAME_8800D80X2 "aic_userconfig_8800d80x2.txt" +#define FW_POWERLIMIT_NAME_8800D80X2 "aic_powerlimit_8800d80x2.txt" + +extern char aic_fw_path[200]; + +int rwnx_request_firmware_common(struct rwnx_hw *rwnx_hw, + u32** buffer, const char *filename); +void rwnx_plat_userconfig_parsing_8800d80x2(char *buffer, int size); +void rwnx_plat_userconfig_parsing(char *buffer, int size); +void rwnx_release_firmware_common(u32** buffer); + + +int aicwf_set_rf_config_8800d80x2(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm) +{ + int ret = 0; + + if ((ret = rwnx_send_txpwr_lvl_v4_req(rwnx_hw))) { + return -1; + } + if ((ret = rwnx_send_txpwr_lvl_adj_req(rwnx_hw))) { + return -1; + } + if ((ret = rwnx_send_txpwr_ofst2x_v2_req(rwnx_hw))) { + return -1; + } + if ((ret = rwnx_send_rf_calib_req(rwnx_hw, cfm))) { + return -1; + } + return 0 ; +} + + +int rwnx_plat_userconfig_load_8800d80x2(struct rwnx_hw *rwnx_hw){ + int size; + u32 *dst=NULL; + char *filename = FW_USERCONFIG_NAME_8800D80X2; + +#ifndef ANDROID_PLATFORM + sprintf(aic_fw_path, "%s/%s", aic_fw_path, "aic8800D80X2"); +#endif + + AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + rwnx_plat_userconfig_parsing_8800d80x2((char *)dst, size); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "userconfig download complete\n\n"); + return 0; + +} + +#ifdef CONFIG_POWER_LIMIT +extern char country_code[]; +int rwnx_plat_powerlimit_load_8800d80x2(struct rwnx_hw *rwnx_hw) +{ + int size; + u32 *dst=NULL; + char *filename = FW_POWERLIMIT_NAME_8800D80X2; + + AICWFDBG(LOGINFO, "powerlimit file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of cfg file\n"); + dst = NULL; + return 0; + } + + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + /* parsing the file */ + rwnx_plat_powerlimit_parsing((char *)dst, size, country_code); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "powerlimit download complete\n\n"); + return 0; +} +#endif + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80x2.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80x2.h new file mode 100644 index 0000000..df6f1dd --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800d80x2.h @@ -0,0 +1,9 @@ +#include + +int rwnx_plat_userconfig_load_8800d80x2(struct rwnx_hw *rwnx_hw); +#ifdef CONFIG_POWER_LIMIT +int rwnx_plat_powerlimit_load_8800d80x2(struct rwnx_hw *rwnx_hw); +#endif +int aicwf_set_rf_config_8800d80x2(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm); + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800dc.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800dc.c new file mode 100644 index 0000000..e3ebbf1 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800dc.c @@ -0,0 +1,3685 @@ +#include "rwnx_main.h" +#include "rwnx_msg_tx.h" +#include "reg_access.h" +#include "rwnx_platform.h" +#include "aicwf_compat_8800dc.h" + +#define RWNX_MAC_FW_RF_BASE_NAME_8800DC "lmacfw_rf_8800dc.bin" + +#ifdef CONFIG_FOR_IPCAM +#define RWNX_MAC_PATCH_BASE_NAME_8800DC "fmacfw_patch_8800dc_ipc" +#else +#define RWNX_MAC_PATCH_BASE_NAME_8800DC "fmacfw_patch_8800dc" +#endif +#define RWNX_MAC_PATCH_NAME2_8800DC RWNX_MAC_PATCH_BASE_NAME_8800DC".bin" +#define RWNX_MAC_PATCH_NAME2_8800DC_U02 RWNX_MAC_PATCH_BASE_NAME_8800DC"_u02.bin" +#define RWNX_MAC_PATCH_NAME2_8800DC_H_U02 RWNX_MAC_PATCH_BASE_NAME_8800DC"_h_u02.bin" + +#define RWNX_MAC_CALIB_BASE_NAME_8800DC "fmacfw_calib_8800dc" +#define RWNX_MAC_CALIB_NAME_8800DC_U02 RWNX_MAC_CALIB_BASE_NAME_8800DC"_u02.bin" +#define RWNX_MAC_CALIB_NAME_8800DC_H_U02 RWNX_MAC_CALIB_BASE_NAME_8800DC"_h_u02.bin" + +#ifdef CONFIG_LOAD_BT_PATCH_IN_FDRV +#define FW_8800DC_U02_ADID_ADDR 0x1017d8 +#define FW_RAM_PATCH_BASE_ADDR 0x184000 +#define FW_ADID_BASE_NAME "fw_adid_8800dc_u02.bin" +#define FW_PATCH_TABLE_NAME_U02 "fw_patch_table_8800dc_u02.bin" +#define FW_PATCH_BASE_NAME_U02 "fw_patch_8800dc_u02.bin" +#define FW_PATCH_TABLE_NAME_U02H "fw_patch_table_8800dc_u02h.bin" +#define FW_PATCH_BASE_NAME_U02H "fw_patch_8800dc_u02h.bin" +#define AICBT_PT_TAG "AICBT_PT_TAG" +#endif +#ifdef CONFIG_FOR_IPCAM +#define RWNX_MAC_PATCH_TABLE_NAME_8800DC "fmacfw_patch_tbl_8800dc_ipc" +#else +#define RWNX_MAC_PATCH_TABLE_NAME_8800DC "fmacfw_patch_tbl_8800dc" +#endif +#define RWNX_MAC_PATCH_TABLE_8800DC RWNX_MAC_PATCH_TABLE_NAME_8800DC ".bin" +#define RWNX_MAC_PATCH_TABLE_8800DC_U02 RWNX_MAC_PATCH_TABLE_NAME_8800DC "_u02.bin" +#define RWNX_MAC_PATCH_TABLE_8800DC_H_U02 RWNX_MAC_PATCH_TABLE_NAME_8800DC "_h_u02.bin" + + +#define RWNX_MAC_RF_PATCH_BASE_NAME_8800DC "fmacfw_rf_patch_8800dc" +#define RWNX_MAC_RF_PATCH_NAME_8800DC RWNX_MAC_RF_PATCH_BASE_NAME_8800DC".bin" +#define FW_USERCONFIG_NAME_8800DC "aic_userconfig_8800dc.txt" +#define FW_USERCONFIG_NAME_8800DW "aic_userconfig_8800dw.txt" +#define FW_POWERLIMIT_NAME_8800DC "aic_powerlimit_8800dc.txt" +#define FW_POWERLIMIT_NAME_8800DW "aic_powerlimit_8800dw.txt" + +#ifdef CONFIG_LOAD_BT_PATCH_IN_FDRV +enum aicbt_patch_table_type { + AICBT_PT_NULL = 0x00, + AICBT_PT_TRAP, + AICBT_PT_B4, + AICBT_PT_BTMODE, + AICBT_PT_PWRON, + AICBT_PT_AF, + AICBT_PT_VER, + AICBT_PT_MAX, +}; + +struct aicbt_patch_table { + char *name; + uint32_t type; + uint32_t *data; + uint32_t len; + struct aicbt_patch_table *next; +}; + +struct aicbt_info_t { + uint32_t btmode; + uint32_t btport; + uint32_t uart_baud; + uint32_t uart_flowctrl; + uint32_t lpm_enable; + uint32_t txpwr_lvl; +}; + +struct aicbsp_info_t { + int hwinfo; + uint32_t cpmode; +}; + +enum aicbsp_cpmode_type { + AICBSP_CPMODE_WORK, + AICBSP_CPMODE_TEST, +}; + +/* btmode + * used for force bt mode,if not AICBSP_MODE_NULL + * efuse valid and vendor_info will be invalid, even has beed set valid +*/ +enum aicbt_btmode_type { + AICBT_BTMODE_BT_ONLY_SW = 0x0, // bt only mode with switch + AICBT_BTMODE_BT_WIFI_COMBO, // wifi/bt combo mode + AICBT_BTMODE_BT_ONLY, // bt only mode without switch + AICBT_BTMODE_BT_ONLY_TEST, // bt only test mode + AICBT_BTMODE_BT_WIFI_COMBO_TEST, // wifi/bt combo test mode + AICBT_MODE_NULL = 0xFF, // invalid value +}; + +enum aicbt_btport_type { + AICBT_BTPORT_NULL, + AICBT_BTPORT_MB, + AICBT_BTPORT_UART, +}; + +enum aicbt_uart_baud_type { + AICBT_UART_BAUD_115200 = 115200, + AICBT_UART_BAUD_921600 = 921600, + AICBT_UART_BAUD_1_5M = 1500000, + AICBT_UART_BAUD_3_25M = 3250000, +}; + +enum aicbt_uart_flowctrl_type { + AICBT_UART_FLOWCTRL_DISABLE = 0x0, // uart without flow ctrl + AICBT_UART_FLOWCTRL_ENABLE, // uart with flow ctrl +}; + +#define AICBSP_HWINFO_DEFAULT (-1) +#define AICBSP_CPMODE_DEFAULT AICBSP_CPMODE_WORK +#define AICBT_TXPWR_DFT 0x6F2F + + +#define AICBT_BTMODE_DEFAULT AICBT_BTMODE_BT_WIFI_COMBO +#define AICBT_BTPORT_DEFAULT AICBT_BTPORT_MB +#define AICBT_UART_BAUD_DEFAULT AICBT_UART_BAUD_1_5M +#define AICBT_UART_FC_DEFAULT AICBT_UART_FLOWCTRL_ENABLE +#define AICBT_LPM_ENABLE_DEFAULT 0 +#define AICBT_TXPWR_LVL_DEFAULT AICBT_TXPWR_DFT + +struct aicbsp_info_t aicbsp_info = { + .hwinfo = AICBSP_HWINFO_DEFAULT, + .cpmode = AICBSP_CPMODE_DEFAULT, +}; + +static struct aicbt_info_t aicbt_info = { + .btmode = AICBT_BTMODE_DEFAULT, + .btport = AICBT_BTPORT_DEFAULT, + .uart_baud = AICBT_UART_BAUD_DEFAULT, + .uart_flowctrl = AICBT_UART_FC_DEFAULT, + .lpm_enable = AICBT_LPM_ENABLE_DEFAULT, + .txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT, +}; +#endif + +int rwnx_plat_bin_fw_upload_2(struct rwnx_hw *rwnx_hw, u32 fw_addr, + char *filename); +int rwnx_request_firmware_common(struct rwnx_hw *rwnx_hw, + u32** buffer, const char *filename); +void rwnx_plat_userconfig_parsing(char *buffer, int size); +void rwnx_release_firmware_common(u32** buffer); + +typedef u32 (*array2_tbl_t)[2]; +typedef u32 (*array3_tbl_t)[3]; + +u32 syscfg_tbl_masked_8800dc[][3] = { + //#ifdef CONFIG_PMIC_SETTING + #if defined(CONFIG_VRF_DCDC_MODE) + {0x7000216C, (0x3 << 2), (0x1 << 2)}, // pmic_pmu_init + {0x700021BC, (0x3 << 2), (0x1 << 2)}, + {0x70002118, ((0x7 << 4) | (0x1 << 7)), ((0x2 << 4) | (0x1 << 7))}, + {0x70002104, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))}, + {0x7000210C, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))}, + {0x70002170, (0xF << 0), (0x1 << 0)}, + {0x70002190, (0x3F << 0), (24 << 0)}, + {0x700021CC, ((0x7 << 4) | (0x1 << 7)), ((0x0 << 4) | (0x0 << 7))}, + {0x700010A0, (0x1 << 11), (0x1 << 11)}, + {0x70001034, ((0x1 << 20) | (0x7 << 26)), ((0x0 << 20) | (0x2 << 26))}, + {0x70001038, (0x1 << 8), (0x1 << 8)}, + {0x70001094, (0x3 << 2), (0x0 << 2)}, + {0x700021D0, ((0x1 << 5) | (0x1 << 6)), ((0x1 << 5) | (0x1 << 6))}, + {0x70001000, ((0x1 << 0) | (0x1 << 20) | (0x1 << 22)), + ((0x1 << 0) | (0x1 << 20) | (0x0 << 22))}, + {0x70001028, (0xf << 2), (0x1 << 2)}, + #else + {0x7000216C, (0x3 << 2), (0x1 << 2)}, // pmic_pmu_init + {0x700021BC, (0x3 << 2), (0x1 << 2)}, + {0x70002118, ((0x7 << 4) | (0x1 << 7)), ((0x2 << 4) | (0x1 << 7))}, + {0x70002104, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))}, + {0x7000210C, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))}, + {0x70002170, (0xF << 0), (0x1 << 0)}, + {0x70002190, (0x3F << 0), (24 << 0)}, + {0x700021CC, ((0x7 << 4) | (0x1 << 7)), ((0x0 << 4) | (0x0 << 7))}, + {0x700010A0, (0x1 << 11), (0x1 << 11)}, + {0x70001034, ((0x1 << 20) | (0x7 << 26)), ((0x0 << 20) | (0x2 << 26))}, + {0x70001038, (0x1 << 8), (0x1 << 8)}, + {0x70001094, (0x3 << 2), (0x0 << 2)}, + {0x700021D0, ((0x1 << 5) | (0x1 << 6)), ((0x1 << 5) | (0x1 << 6))}, + {0x70001000, ((0x1 << 0) | (0x1 << 20) | (0x1 << 22)), + ((0x0 << 0) | (0x1 << 20) | (0x0 << 22))}, + {0x70001028, (0xf << 2), (0x1 << 2)}, + #endif + //#endif /* CONFIG_PMIC_SETTING */ + {0x00000000, 0x00000000, 0x00000000}, // last one +}; + +u32 syscfg_tbl_masked_8800dc_h[][3] = { + {0x7000216C, ((0x3 << 2) | (0x3 << 4)), ((0x2 << 2) | (0x2 << 4))}, // pmic_pmu_init + {0x70002138, (0xFF << 0), (0xFF << 0)}, + {0x7000213C, (0xFF << 0), (0xFF << 0)}, + {0x70002144, (0xFF << 0), (0xFF << 0)}, + {0x700021BC, (0x3 << 2), (0x1 << 2)}, + {0x70002118, ((0x7 << 4) | (0x1 << 7)), ((0x2 << 4) | (0x1 << 7))}, + {0x70002104, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))}, + {0x7000210C, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))}, + {0x70002170, (0xF << 0), (0x1 << 0)}, + {0x70002190, (0x3F << 0), (24 << 0)}, + {0x700021CC, ((0x7 << 4) | (0x1 << 7)), ((0x0 << 4) | (0x0 << 7))}, + {0x700010A0, (0x1 << 11), (0x1 << 11)}, + //{0x70001034, ((0x1 << 20) | (0x7 << 26)), ((0x0 << 20) | (0x2 << 26))}, + {0x70001038, (0x1 << 8), (0x1 << 8)}, + {0x70001094, (0x3 << 2), (0x0 << 2)}, + {0x700021D0, ((0x1 << 5) | (0x1 << 6)), ((0x1 << 5) | (0x1 << 6))}, + #if defined(CONFIG_VRF_DCDC_MODE) + {0x70001000, ((0x1 << 0) | (0x1 << 20) | (0x1 << 22)), + ((0x1 << 0) | (0x1 << 20) | (0x0 << 22))}, + #else + {0x70001000, ((0x1 << 0) | (0x1 << 20) | (0x1 << 22)), + ((0x0 << 0) | (0x1 << 20) | (0x0 << 22))}, + #endif + {0x70001028, (0xf << 2), (0x1 << 2)}, + + {0x00000000, 0x00000000, 0x00000000}, // last one +}; + +u32 syscfg_tbl_masked_8800dc_u01[][3] = { + //#ifdef CONFIG_PMIC_SETTING + {0x70001000, (0x1 << 16), (0x1 << 16)}, // for low temperature + {0x70001028, (0x1 << 6), (0x1 << 6)}, + {0x70001000, (0x1 << 16), (0x0 << 16)}, + //#endif /* CONFIG_PMIC_SETTING */ +}; + + +u32 syscfg_tbl_8800dc[][2] = { + {0x40500010, 0x00000004}, + {0x40500010, 0x00000006},//160m clk +}; + + +u32 patch_tbl_wifisetting[][2] = +{ + #if !defined(CONFIG_FPGA_VERIFICATION) + {0x0090, 0x0013FC00}, //rx_ringbuf_start2 + #endif +#ifdef CONFIG_USB_TX_AGGR + {0x0100, 0x03021714}, //usb fc params(rx msg fc recover, rx msg fc trigger, wifi fc recover, wifi fc trigger) + {0x0120, 0x140A0100}, //usb agg tx params(total cnt, aggr cnt, out en, global out nak) +#endif //CONFIG_USB_TX_AGGR + {0x00b0, 0xAD180100}, +#ifdef CONFIG_PRBREQ_REPORT + {0x0138, 0x00010a00}, //apm probe resp offload en +#endif +}; + +u32 jump_tbl[][2] = +{ + {296, 0x180001}, + {137, 0x180011}, + {303, 0x1810f9}, + {168, 0x18186d}, + {308, 0x181bbd}, + {288, 0x1820c1}, +}; + + +uint32_t ldpc_cfg_ram[] = { +#if 0//def CONFIG_FPGA_VERIFICATION + 0x00363638, + 0x1DF8F834, + 0x1DF8F834, + 0x1DF8F834, + 0x1DF8F834, + 0x002F2F31, + 0x1DF8F82C, + 0x1DF8F82C, + 0x1DF8F82C, + 0x1DF8F82C, + 0x00363639, + 0x1AA5F834, + 0x1AA5F834, + 0x1ADEF834, + 0x1ADEF834, + 0x003A3A3E, + 0x1578F436, + 0x1578F436, + 0x1578F436, + 0x15B6F436, + 0x003B3B40, + 0x1DF8F838, + 0x1DF8F838, + 0x1DF8F838, + 0x1DF8F838, + 0x003B3B41, + 0x1DC4F838, + 0x1DC4F838, + 0x1DF8F838, + 0x1DF8F838, + 0x003B3B40, + 0x1781F838, + 0x1781F838, + 0x1781F838, + 0x17C4F838, + 0x003B3B40, + 0x0E81F838, + 0x0E81F838, + 0x0E81F838, + 0x0E82F838, + 0x003F3F43, + 0x1A92F83D, + 0x1A92F83E, + 0x1A92F83D, + 0x1ADDF83D, + 0x00272729, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F843, + 0x1DF8F843, + 0x00272729, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F842, + 0x1DF8F842, + 0x00262628, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x00252528, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x00262628, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x00242427, + 0x1DF8F821, + 0x1DF8F821, + 0x1DF8F821, + 0x1DF8F821, + 0x00232326, + 0x1DF8F821, + 0x1DF8F820, + 0x1DF8F820, + 0x1DF8F820, + 0x00262628, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x00242427, + 0x1DF8F821, + 0x1DF8F821, + 0x1DF8F821, + 0x1DF8F821, + 0x001F1F21, + 0x1DF8F81D, + 0x1DF8F81D, + 0x1DF8F81D, + 0x1DF8F81D, + 0x00262643, + 0x1DF8F822, + 0x1DF8F821, + 0x1DF8F821, + 0x1DF8F821, + 0x0018182B, + 0x1DF8F816, + 0x1DBDF815, + 0x1DF8F815, + 0x1DF8F815, + 0x0018182A, + 0x1195F836, + 0x1195F815, + 0x1195F815, + 0x1196F815, + 0x0028282C, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F824, + 0x0027272C, + 0x1DF8F824, + 0x1DF8F823, + 0x1DF8F823, + 0x1DF8F823, + 0x0082824A, + 0x1ADFF841, + 0x1ADDF822, + 0x1ADEF822, + 0x1ADFF822, + 0x003E3E40, + 0x09D1F81D, + 0x095BF81D, + 0x095BF81D, + 0x095BF81D, + 0x0029292D, + 0x1DF8F825, + 0x1DF8F825, + 0x1DF8F825, + 0x1DF8F825, + 0x0028282C, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F824, + 0x0029292D, + 0x1DF8F825, + 0x1DF8F825, + 0x1DF8F825, + 0x1DF8F825, + 0x0028282E, + 0x1DF8F825, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F824, + 0x0026262C, + 0x1DF8F823, + 0x1DF8F822, + 0x1DF8F822, + 0x1DF8F822, + 0x0028282D, + 0x1DF8F825, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F824, + 0x00282852, + 0x1DF8F827, + 0x1DF8F824, + 0x1DF8F824, + 0x1DF8F824, + 0x0029294E, + 0x1DF8F823, + 0x1DF8F822, + 0x1DF8F822, + 0x1DF8F822, + 0x00212143, + 0x1DF8F821, + 0x1DECF81D, + 0x1DF4F81D, + 0x1DF8F81D, + 0x0086864D, + 0x1CF0F844, + 0x1CEDF823, + 0x1CEFF822, + 0x1CF0F822, + 0x0047474D, + 0x1BE8F823, + 0x1BE8F823, + 0x1BE9F822, + 0x1BEAF822, + 0x0018182F, + 0x14B0F83C, + 0x14B0F814, + 0x14B0F814, + 0x14B0F814, + 0x00404040, + 0x0AE1F81E, + 0x0A61F81D, + 0x0A61F81D, + 0x0A61F81D, + 0x002C2C40, + 0x09555526, + 0x09555512, + 0x09555513, + 0x09555512, + 0x00181840, + 0x06333329, + 0x06333314, + 0x06333314, + 0x06333314, + 0x002B2B2F, + 0x1DF8F828, + 0x1DF8F828, + 0x1DF8F828, + 0x1DF8F828, + 0x002B2B32, + 0x1DF8F829, + 0x1DF8F828, + 0x1DF8F828, + 0x1DF8F828, + 0x002A2A2F, + 0x1DF8F827, + 0x1DF8F827, + 0x1DF8F827, + 0x1DF8F827, + 0x002A2A57, + 0x1DF8F82B, + 0x1DF8F827, + 0x1DF8F827, + 0x1DF8F827, + 0x00919152, + 0x1DF8F84B, + 0x1DF8F825, + 0x1DF8F825, + 0x1DF8F825, + 0x004C4C51, + 0x1DF8F826, + 0x1DF8F825, + 0x1DF8F825, + 0x1DF8F825, + 0x00444440, + 0x0CF8F820, + 0x0C6EF81F, + 0x0C6EF81F, + 0x0C6EF81F, + 0x00424240, + 0x0D75753E, + 0x0D75751E, + 0x0D75751E, + 0x0D75751E, + 0x00191940, + 0x0539392E, + 0x05393914, + 0x05393914, + 0x05393914, + 0x002F2F32, + 0x1AA5F82C, + 0x1AA5F82C, + 0x1ADEF82C, + 0x1ADEF82C, + 0x002F2F40, + 0x0C6EDE2C, + 0x0C6EDE2C, + 0x0C6EDE2C, + 0x0C6EDE2C, + 0x00323240, + 0x053BB62E, + 0x053BB62E, + 0x053BB62E, + 0x053BB62E, + 0x00333339, + 0x1DC4F82F, + 0x1DC4F82F, + 0x1DF8F82F, + 0x1DF8F82F, + 0x00333340, + 0x0E81F82F, + 0x0E81F82F, + 0x0E81F82F, + 0x0E82F82F, + 0x00333340, + 0x063FC42F, + 0x063FC42F, + 0x063FC42F, + 0x063FC42F, + 0x00404040, + 0x063FC42F, + 0x063FC42F, + 0x063FC42F, + 0x063FC42F, + 0x00363640, + 0x0747DD33, + 0x0747DD33, + 0x0747DD33, + 0x0747DD33, + 0x00404040, + 0x0747DD33, + 0x0747DD33, + 0x0747DD33, + 0x0747DD33, + 0x00292940, + 0x07484825, + 0x07484812, + 0x07484812, + 0x07484812, + 0x00404040, + 0x07343428, + 0x07343414, + 0x07343414, + 0x07343414, + 0x00404040, + 0x0538382A, + 0x05383814, + 0x05383814, + 0x05383814, + 0x00404040, + 0x05292914, + 0x05292909, + 0x05292909, + 0x05292909, + 0x000B0B40, + 0x02111108, + 0x0211110E, + 0x02111108, + 0x02111108, + 0x00404040, + 0x063E3E2E, + 0x063E3E15, + 0x063E3E14, + 0x063E3E14, + 0x00404040, + 0x062E2E14, + 0x062E2E09, + 0x062E2E09, + 0x062E2E09, + 0x000B0B40, + 0x02131308, + 0x0213130F, + 0x02131308, + 0x02131308 +#else + 0x00767679, + 0x1DF8F870, + 0x1DF8F870, + 0x1DF8F870, + 0x1DF8F870, + 0x006E6E72, + 0x1DF8F869, + 0x1DF8F869, + 0x1DF8F869, + 0x1DF8F869, + 0x0076767B, + 0x1DF8F870, + 0x1DF8F870, + 0x1DF8F870, + 0x1DF8F870, + 0x007E7E85, + 0x1DF4F876, + 0x1DF4F876, + 0x1DF4F876, + 0x1DF8F876, + 0x0081818A, + 0x1DF8F87B, + 0x1DF8F87B, + 0x1DF8F87B, + 0x1DF8F87B, + 0x0081818D, + 0x1DF8F87B, + 0x1DF8F87B, + 0x1DF8F87B, + 0x1DF8F87B, + 0x0081818A, + 0x1DF8F87B, + 0x1DF8F87C, + 0x1DF8F87B, + 0x1DF8F87B, + 0x007E7E40, + 0x1DF8F87B, + 0x1DF8F87B, + 0x1DF8F87B, + 0x1DF8F87B, + 0x008B8B92, + 0x1DF8F887, + 0x1DF8F889, + 0x1DF8F887, + 0x1DF8F887, + 0x00515155, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F889, + 0x1DF8F889, + 0x00515154, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F888, + 0x1DF8F888, + 0x004F4F53, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x004F4F53, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x004F4F53, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x004E4E53, + 0x1DF8F849, + 0x1DF8F848, + 0x1DF8F848, + 0x1DF8F848, + 0x004D4D52, + 0x1DF8F847, + 0x1DF8F847, + 0x1DF8F847, + 0x1DF8F847, + 0x004F4F55, + 0x1DF8F84B, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x004E4E53, + 0x1DF8F849, + 0x1DF8F848, + 0x1DF8F848, + 0x1DF8F848, + 0x0049494D, + 0x1DF8F844, + 0x1DF8F844, + 0x1DF8F844, + 0x1DF8F844, + 0x0051518F, + 0x1DF8F849, + 0x1DF8F848, + 0x1DF8F848, + 0x1DF8F848, + 0x00424277, + 0x1DF8F83F, + 0x1DF8F83C, + 0x1DF8F83C, + 0x1DF8F83C, + 0x00424275, + 0x1DF8F89E, + 0x1DF8F83C, + 0x1DF8F83C, + 0x1DF8F83C, + 0x0055555C, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F84C, + 0x0053535C, + 0x1DF8F84C, + 0x1DF8F84B, + 0x1DF8F84B, + 0x1DF8F84B, + 0x00F8F89E, + 0x1DF8F88C, + 0x1DF8F84A, + 0x1DF8F84A, + 0x1DF8F84A, + 0x00898940, + 0x18F8F846, + 0x18CFF845, + 0x18CFF844, + 0x18CFF844, + 0x0056565F, + 0x1DF8F84F, + 0x1DF8F84F, + 0x1DF8F84F, + 0x1DF8F84F, + 0x0055555E, + 0x1DF8F84E, + 0x1DF8F84E, + 0x1DF8F84E, + 0x1DF8F84E, + 0x0056565F, + 0x1DF8F84F, + 0x1DF8F84F, + 0x1DF8F84F, + 0x1DF8F84F, + 0x00555561, + 0x1DF8F850, + 0x1DF8F84E, + 0x1DF8F84E, + 0x1DF8F84E, + 0x0053535F, + 0x1DF8F84D, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F84C, + 0x0055555F, + 0x1DF8F84F, + 0x1DF8F84E, + 0x1DF8F84E, + 0x1DF8F84E, + 0x005555AA, + 0x1DF8F854, + 0x1DF8F84E, + 0x1DF8F84E, + 0x1DF8F84E, + 0x005959A6, + 0x1DF8F84D, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F84C, + 0x004F4F9B, + 0x1DF8F84E, + 0x1DF8F846, + 0x1DF8F846, + 0x1DF8F846, + 0x00F8F8A5, + 0x1DF8F894, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F84C, + 0x009898A4, + 0x1DF8F84D, + 0x1DF8F84C, + 0x1DF8F84C, + 0x1DF8F84C, + 0x00464686, + 0x1DF8F8B3, + 0x1DF8F83D, + 0x1DF8F83D, + 0x1DF8F83D, + 0x008E8E40, + 0x1AF8F848, + 0x1ADFF848, + 0x1ADFF846, + 0x1ADFF846, + 0x007F7F40, + 0x18D2D275, + 0x18D2D23A, + 0x18D2D23A, + 0x18D2D239, + 0x00454540, + 0x0F868664, + 0x0F86863E, + 0x0F86863D, + 0x0F86863D, + 0x005C5C64, + 0x1DF8F856, + 0x1DF8F855, + 0x1DF8F855, + 0x1DF8F855, + 0x005B5B68, + 0x1DF8F858, + 0x1DF8F855, + 0x1DF8F855, + 0x1DF8F855, + 0x005A5A64, + 0x1DF8F855, + 0x1DF8F854, + 0x1DF8F854, + 0x1DF8F854, + 0x005A5AB5, + 0x1DF8F85B, + 0x1DF8F855, + 0x1DF8F854, + 0x1DF8F854, + 0x00F8F8B0, + 0x1DF8F8A3, + 0x1DF8F852, + 0x1DF8F852, + 0x1DF8F852, + 0x00A4A4AE, + 0x1DF8F854, + 0x1DF8F852, + 0x1DF8F852, + 0x1DF8F852, + 0x009A9A40, + 0x1DF8F84E, + 0x1DF8F84D, + 0x1DF8F84C, + 0x1DF8F84C, + 0x009C9C40, + 0x1DF8F895, + 0x1DF8F849, + 0x1DF8F84A, + 0x1DF8F84A, + 0x00494940, + 0x1197976F, + 0x11979742, + 0x11979741, + 0x11979741, + 0x006E6E74, + 0x1DF8F869, + 0x1DF8F869, + 0x1DF8F869, + 0x1DF8F869, + 0x006E6E40, + 0x1ADEF869, + 0x1ADEF869, + 0x1ADEF869, + 0x1ADEF869, + 0x00757540, + 0x0D78F86E, + 0x0D78F86E, + 0x0D78F86E, + 0x0D79F86E, + 0x00787885, + 0x1DF8F873, + 0x1DF8F873, + 0x1DF8F873, + 0x1DF8F873, + 0x00787840, + 0x1DF8F873, + 0x1DF8F873, + 0x1DF8F873, + 0x1DF8F873, + 0x00787840, + 0x0E81F873, + 0x0E81F873, + 0x0E81F873, + 0x0E82F873, + 0x00404040, + 0x0E82F873, + 0x0E82F873, + 0x0E82F873, + 0x0E82F873, + 0x00818140, + 0x1092F87E, + 0x1092F87E, + 0x1092F87E, + 0x1092F87E, + 0x00404040, + 0x1092F87E, + 0x1092F87E, + 0x1092F87E, + 0x1092F87E, + 0x00737340, + 0x14B2B26B, + 0x14B2B235, + 0x14B2B235, + 0x14B2B235, + 0x00404040, + 0x0E828260, + 0x0E82823D, + 0x0E82823C, + 0x0E82823C, + 0x00404040, + 0x0F8B8B66, + 0x0F8B8B3F, + 0x0F8B8B3D, + 0x0F8B8B3D, + 0x00404040, + 0x0B68683D, + 0x0B68681E, + 0x0B68681E, + 0x0B68681E, + 0x00222240, + 0x06434318, + 0x06434329, + 0x06434318, + 0x06434318, + 0x00404040, + 0x129D9D72, + 0x129D9D43, + 0x129D9D41, + 0x129D9D41, + 0x00404040, + 0x0D757542, + 0x0D757520, + 0x0D757520, + 0x0D757520, + 0x00232340, + 0x084C4C19, + 0x084C4C2C, + 0x084C4C19, + 0x084C4C19 +#endif +}; + + +uint32_t agc_cfg_ram[] = { + 0x20000000, + 0x0400000E, + 0x3000200E, + 0x5B000000, + 0x0400004B, + 0x3000008E, + 0x32000000, + 0x0400007B, + 0x40000000, + 0xF8000026, + 0x04000011, + 0x4819008E, + 0x9C000020, + 0x08000191, + 0x38008000, + 0x0A000000, + 0x08104411, + 0x38018000, + 0x0C004641, + 0x08D00014, + 0x30000000, + 0x01000000, + 0x04000017, + 0x30000000, + 0x3C000000, + 0x0400001A, + 0x38020000, + 0x40000001, + 0x0800001D, + 0x3808008E, + 0x14000050, + 0x08000020, + 0x4000008E, + 0xA400007B, + 0x00000101, + 0x3000339F, + 0x41000700, + 0x04104420, + 0x90000000, + 0x49000000, + 0xF00E842F, + 0xEC0E842C, + 0xEC0E842C, + 0x04000032, + 0x30000000, + 0x48000101, + 0x04000032, + 0x30000000, + 0x48000202, + 0x04000032, + 0x30000000, + 0x46000000, + 0x04000011, + 0x58010006, + 0x3D040472, + 0xDC204439, + 0x081DD4D2, + 0x480A0006, + 0xDC2044DC, + 0x081DD43C, + 0x38050004, + 0x0EF1F1C3, + 0x342044DC, + 0x30000000, + 0x01000000, + 0x04000042, + 0x30000000, + 0x33000000, + 0x04104445, + 0x38008000, + 0x2200109C, + 0x08104448, + 0x38008000, + 0x23D4509C, + 0x08104417, + 0x9000A000, + 0x32000000, + 0x18000063, + 0x14000060, + 0x1C000051, + 0x10000057, + 0x38028000, + 0x0C000001, + 0x08D04466, + 0x3000200F, + 0x00000000, + 0x00000000, + 0x38030000, + 0x0C002601, + 0x08D0445A, + 0x30000000, + 0x3D020230, + 0x0400005D, + 0x30000000, + 0x3E000100, + 0x04000066, + 0x38028000, + 0x0C001601, + 0x34204466, + 0x38028000, + 0x0C000A01, + 0x34204466, + 0x38008004, + 0xFF000000, + 0x0800007B, + 0x3800802F, + 0x26000000, + 0x0800006C, + 0x380404AF, + 0x1F191010, + 0x0800006F, + 0x20000CAF, + 0x04000071, + 0x60000CAF, + 0x18700079, + 0x14000077, + 0x10000075, + 0x28140CAF, + 0x09B00084, + 0x280A0CAF, + 0x09B00084, + 0x28060CAF, + 0x09B00084, + 0x28048086, + 0x0800007D, + 0x38000086, + 0x22800000, + 0x04000080, + 0x30000000, + 0x0EF1F101, + 0x36004883, + 0x28020000, + 0x08000085, + 0x3802008E, + 0x3D040431, + 0x08000088, + 0x3805008E, + 0x1F241821, + 0x0800008B, + 0x3000008E, + 0xA0163021, + 0x0400008E, + 0x3000008E, + 0x0EF10012, + 0x34000091, + 0x300000CC, + 0x50000000, + 0x04000094, + 0x380095FE, + 0x32010000, + 0x04000097, + 0x50001FFE, + 0x5A010000, + 0x6DC9989B, + 0xFC19D4B9, + 0x30000186, + 0x3D840373, + 0x0400009E, + 0x3000008E, + 0x0A000000, + 0x040000A1, + 0x3000008E, + 0x22C00000, + 0x040000A4, + 0x9000028E, + 0x32010001, + 0x8E4000AA, + 0xC80000B0, + 0x00000000, + 0x00000000, + 0x3000008E, + 0x32010001, + 0x040000CB, + 0x3000008E, + 0x29000000, + 0x94045011, + 0x300019B6, + 0x32010000, + 0x040000B3, + 0x300019B6, + 0x3D040431, + 0x040000B6, + 0x300019B6, + 0x22800000, + 0x04000097, + 0x30000186, + 0x3D840473, + 0x040000BC, + 0x3000008E, + 0x29030000, + 0x040000BF, + 0x9AEE028E, + 0x32010100, + 0x7C0000C5, + 0xCC0000B0, + 0x080000B0, + 0x00000000, + 0x3000008E, + 0x32010100, + 0x040000C8, + 0x3000028E, + 0x29000000, + 0x94045011, + 0x5000038E, + 0x29000000, + 0x94045011, + 0xC0000035, + 0x38010006, + 0x3D040472, + 0x080000D2, + 0x30000004, + 0x0EF1F141, + 0x340000D5, + 0x28040004, + 0x080000D7, + 0x2808000E, + 0x080000D9, + 0x3000018E, + 0x0EF10052, + 0x340000DC, + 0x3000038E, + 0x29000000, + 0x94045011, + 0x38020000, + 0x32000000, + 0x080000E2, + 0x60000000, + 0xD80000E6, + 0xD40000E9, + 0x040000EC, + 0x30000000, + 0x0EF1F121, + 0x360048EF, + 0x30000000, + 0x0C002421, + 0x360048EF, + 0x30000000, + 0x0C000021, + 0x360048EF, + 0x28020000, + 0x0800007B, + 0x50001EFE, + 0x5A010000, + 0x6DC998F5, + 0xFC19D4F8, + 0x3000028E, + 0x32000040, + 0x040000FB, + 0x3AEE028E, + 0x32000080, + 0x040000FB, + 0x30000000, + 0x0EF1F101, + 0x360048FE, + 0x28020000, + 0x08000100, + 0x3802008E, + 0x3D040431, + 0x08000103, + 0x3805008E, + 0x1F241821, + 0x08000106, + 0x3000008E, + 0xA0163021, + 0x04000109, + 0x3000008E, + 0x0EF10012, + 0x3400010C, + 0x300014F6, + 0x32010000, + 0x04000114, + 0x20000000, + 0x04000111, + 0x300000EC, + 0x50000000, + 0x040000F1, + 0x300014F6, + 0x32030000, + 0x04000117, + 0x30001086, + 0x3D840473, + 0x0400011A, + 0x5000108E, + 0x22C00000, + 0x8E47C0CB, + 0xCB30011E, + 0x300019B6, + 0x32040000, + 0x04000121, + 0x300019B6, + 0x3D040431, + 0x04000124, + 0x300019B6, + 0x22800000, + 0x04000111, + 0x00000000, + 0x00000000, + 0x00000000, + 0x30000186, + 0x3D840473, + 0x0400012D, + 0x5000038E, + 0x29000000, + 0x94045011, + 0xC0000131, + 0x380C800E, + 0xFF000000, + 0x08000134, + 0x30000004, + 0x0FF1F103, + 0x34000137, + 0x28020000, + 0x08000139, + 0x3000038E, + 0x29000000, + 0x94045011, + 0x00000000, + 0x00000000, + 0x00000000, + 0x58010006, + 0x3D040472, + 0xDC204543, + 0x081DD4D2, + 0x480A0006, + 0xDC2044DC, + 0x081DD546, + 0x38050004, + 0x0EF1F141, + 0x342044DC, + 0x2802800E, + 0x080000DC, + 0x48000035, + 0x0400014A, + 0x7896638F, + 0x4100000F, + 0x8C00014F, + 0x080450C4, + 0x90104574, + 0x88C8620F, + 0xC000015A, + 0x90104574, + 0x08104554, + 0x94104557, + 0x3000628F, + 0x29000000, + 0x9404517A, + 0x3000638F, + 0x29000000, + 0x0410457A, + 0x3800E005, + 0x3D010131, + 0x0810455D, + 0xA832600F, + 0x90104574, + 0x08000154, + 0x94104557, + 0xC6104567, + 0xC4185563, + 0x5802E00F, + 0x0FEEEA07, + 0x80000174, + 0x3420456B, + 0x5802E00F, + 0x0EEEEA07, + 0x80000174, + 0x3420456B, + 0x30004000, + 0x33000001, + 0x0400016E, + 0x38034005, + 0x3D030373, + 0x08000171, + 0x30006007, + 0x33000000, + 0x04000174, + 0x3000608F, + 0x29000000, + 0x94045177, + 0x4000608F, + 0xA010457D, + 0x0410457A, + 0x3000608F, + 0x64000101, + 0x04104411, + 0x3000608F, + 0x64000101, + 0x04104580, + 0x3000618F, + 0x42000001, + 0x04000183, + 0x38028000, + 0x32000000, + 0x08104586, + 0x280A618F, + 0x08000188, + 0x480A618F, + 0xBC00018B, + 0x0800018E, + 0x3000618F, + 0x34000001, + 0x04000005, + 0x3000618F, + 0x34000000, + 0x04000008, + 0x3000008F, + 0x0EEAED0F, + 0x36000194, + 0x38038000, + 0x34000000, + 0x08000197, + 0x38028005, + 0x29010002, + 0x0800019A, + 0x3000028F, + 0x2200209C, + 0x0400019D, + 0x3000028F, + 0x23D4509C, + 0x040001A0, + 0x2814028F, + 0x080001A2, + 0x3000028F, + 0x43010201, + 0x040001A5, + 0x3000128F, + 0x32000100, + 0x040001A8, + 0x5AEE138F, + 0x4100000F, + 0x7C0001AC, + 0x080000F9, + 0x592C138F, + 0x29000000, + 0x8C0001B0, + 0x080000F9, + 0x2000138F, + 0x94045011, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000 +}; + +#if !defined(CONFIG_EXT_FEM_8800DCDW) +uint32_t txgain_map[96] = { +#ifdef CONFIG_FPGA_VERIFICATION + 0x20c0c971, + 0x20c0c980, + 0x20c0c992, + 0x20c0c9a6, + 0x20c0c9bf, + 0x20c0caa5, + 0x20c0cabd, + 0x20c0cba0, + 0x20c0cbb6, + 0x20c0cbea, + 0x20c0ccc5, + 0x20c0cdac, + 0x20c0cdd0, + 0x20c0ceb2, + 0x20c0ceff, + 0x20c0cfff, + 0x20c0c922, + 0x20c0c922, + 0x20c0c922, + 0x20c0c922, + 0x20c0c922, + 0x20c0c922, + 0x20c0c922, + 0x20c0c927, + 0x20c0c92c, + 0x20c0c931, + 0x20c0c937, + 0x20c0c93f, + 0x20c0c946, + 0x20c0c94f, + 0x20c0c959, + 0x20c0c964, + 0x20c0cbee, + 0x20c0cce0, + 0x20c0ccff, + 0x20c0cde2, + 0x20c0cdfe, + 0x20c0cede, + 0x20c0cefc, + 0x20c0cfd9, + 0x20c0cff8, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c98c, + 0x20c0ca79, + 0x20c0ca89, + 0x20c0cb74, + 0x20c0cb84, + 0x20c0cb94, + 0x20c0cba8, + 0x20c0cbbb, + 0x20c0cbd2, + 0x20c0cbee, + 0x20c0cce0, + 0x20c0ccff, + 0x20c0cde2, + 0x20c0cdfe, + 0x20c0cede, + 0x20c0cefc, + 0x20c0cfd9, + 0x20c0cff8, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0cfff, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c97c, + 0x20c0c98c, + 0x20c0ca79, + 0x20c0ca89, + 0x20c0cb74, + 0x20c0cb84, + 0x20c0cb94, + 0x20c0cba8, + 0x20c0cbbb, + 0x20c0cbd2, +#else + //11b + 0x00ffd780, + 0x00ffd872, + 0x00ffd880, + 0x00ffd972, + 0x00ffd980, + 0x00ffda75, + 0x00ffda86, + 0x00ffdb77, + 0x00ffdb86, + 0x00ffdc78, + 0x00ffdc89, + 0x00ffdd79, + 0x00ffdd89, + 0x00ffde83, + 0x00ffdf79, + 0x00ffdf8b, + 0x00ffd072, + 0x00ffd072, + 0x00ffd080, + 0x00ffd172, + 0x00ffd180, + 0x00ffd272, + 0x00ffd280, + 0x00ffd36d, + 0x00ffd379, + 0x00ffd46d, + 0x00ffd479, + 0x00ffd572, + 0x00ffd580, + 0x00ffd672, + 0x00ffd680, + 0x00ffd772, + //high + 0x00ffc87d, + 0x00ffc88b, + 0x00ffc979, + 0x00ffc989, + 0x00ffcc4b, + 0x00ffcc54, + 0x00ffcc5e, + 0x00ffcc69, + 0x00ffcc78, + 0x00ffcc85, + 0x00ffcd70, + 0x00ffcd80, + 0x00ffce70, + 0x00ffce80, + 0x00ffcf7d, + 0x00ffcf90, + 0x00ffc080, + 0x00ffc090, + 0x00ffc180, + 0x00ffc190, + 0x00ffc27b, + 0x00ffc28b, + 0x00ffc37b, + 0x00ffc390, + 0x00ffc485, + 0x00ffc495, + 0x00ffc579, + 0x00ffc589, + 0x00ffc679, + 0x00ffc689, + 0x00ffc780, + 0x00ffc790, + //low + 0x00ffc87d, + 0x00ffc88b, + 0x00ffc979, + 0x00ffc989, + 0x00ffcc4b, + 0x00ffcc54, + 0x00ffcc5e, + 0x00ffcc69, + 0x00ffcc78, + 0x00ffcc85, + 0x00ffcd70, + 0x00ffcd80, + 0x00ffcd90, + 0x00ffcf68, + 0x00ffcf75, + 0x00ffcf83, + 0x00ffc080, + 0x00ffc090, + 0x00ffc180, + 0x00ffc190, + 0x00ffc27b, + 0x00ffc28b, + 0x00ffc37b, + 0x00ffc390, + 0x00ffc485, + 0x00ffc495, + 0x00ffc579, + 0x00ffc589, + 0x00ffc679, + 0x00ffc689, + 0x00ffc780, + 0x00ffc790, +#endif +}; + +const uint32_t txgain_map_h[96] = +{ + //11b + 0xffd888, //11 + 0xffd979, //12 + 0xffd988, //13 + 0xffda79, //14 + 0xffda88, //15 + 0xffdb79, //16 + 0xffdb88, //17 + 0xffdc72, //18 + 0xffdc80, //19 + 0xffdd80, //20 + 0xffde66, //21 + 0xffde72, //22 + 0xffde80, //23 + 0xffdf79, //24 + 0xffdf88, //25 + 0xffdf98, //26 + 0xffd079, //-5 + 0xffd088, //-4 + 0xffd179, //-3 + 0xffd188, //-2 + 0xffd288, //-1 + 0xffd36c, //0 + 0xffd379, //1 + 0xffd388, //2 + 0xffd479, //3 + 0xffd488, //4 + 0xffd579, //5 + 0xffd588, //6 + 0xffd679, //7 + 0xffd688, //8 + 0xffd779, //9 + 0xffd879, //10 + //high + 0xffc879, //8 + 0xffc96b, //9 + 0xffc979, //10 + 0xffcc45, //11 + 0xffcc4d, //12 + 0xffcc56, //13 + 0xffcc60, //14 + 0xffcc6b, //15 + 0xffcc79, //16 + 0xffcd72, //17 + 0xffce60, //18 + 0xffce72, //19 + 0xffcf72, //20 + 0xffcf80, //21 + 0xffcf90, //22 + 0xffcf90, //23 + 0xffc079, //-8 + 0xffc16b, //-7 + 0xffc179, //-6 + 0xffc26b, //-5 + 0xffc279, //-4 + 0xffc36b, //-3 + 0xffc379, //-2 + 0xffc46b, //-1 + 0xffc479, //0 + 0xffc56b, //1 + 0xffc579, //2 + 0xffc66b, //3 + 0xffc679, //4 + 0xffc76b, //5 + 0xffc779, //6 + 0xffc86b, //7 + //low + 0xffc879, //8 + 0xffc96b, //9 + 0xffc979, //10 + 0xffcc45, //11 + 0xffcc4d, //12 + 0xffcc56, //13 + 0xffcc60, //14 + 0xffcc6b, //15 + 0xffcc79, //16 + 0xffcd72, //17 + 0xffcf5b, //18 + 0xffcf66, //19 + 0xffcf72, //20 + 0xffcf80, //21 + 0xffcf90, //22 + 0xffcf90, //23 + 0xffc079, //-8 + 0xffc16b, //-7 + 0xffc179, //-6 + 0xffc26b, //-5 + 0xffc279, //-4 + 0xffc36b, //-3 + 0xffc379, //-2 + 0xffc46b, //-1 + 0xffc479, //0 + 0xffc56b, //1 + 0xffc579, //2 + 0xffc66b, //3 + 0xffc679, //4 + 0xffc76b, //5 + 0xffc779, //6 + 0xffc86b, //7 +}; + +#else /* #ifdef CONFIG_EXT_FEM_8800DCDW */ +const uint32_t txgain_map_femkct[96] = +{ + //11b + 0x00ffd780,//15 + 0x00ffd872,//16 + 0x00ffd880,//17 + 0x00ffd972,//18 + 0x00ffd980,//19 + 0x00ffda72,//20 + 0x00ffda80,//21 + 0x00ffdb72,//22 + 0x00ffdb80,//23 + 0x00ffdc72,//24 + 0x00ffdc80,//25 + 0x00ffdd72,//26 + 0x00ffdd80,//27 + 0x00ffde72,//28 + 0x00ffde80,//29 + 0x00ffdf72,//30 + 0x00ffd072,//-1 + 0x00ffd072,//0 + 0x00ffd080,//1 + 0x00ffd172,//2 + 0x00ffd180,//3 + 0x00ffd272,//4 + 0x00ffd280,//5 + 0x00ffd36d,//6 + 0x00ffd379,//7 + 0x00ffd46d,//8 + 0x00ffd479,//9 + 0x00ffd572,//10 + 0x00ffd580,//11 + 0x00ffd672,//12 + 0x00ffd680,//13 + 0x00ffd772,//14 + //high + 0x00ffc872,//11 + 0x00ffc880,//12 + 0x00ffc972,//13 + 0x00ffc980,//14 + 0x00ffca72,//15 + 0x00ffca80,//16 + 0x00ffcb72,//17 + 0x00ffcb80,//18 + 0x00ffcc72,//19 + 0x00ffcc80,//20 + 0x00ffcd72,//21 + 0x00ffcd80,//22 + 0x00ffce72,//23 + 0x00ffce80,//24 + 0x00ffcf72,//25 + 0x00ffcf80,//26 + 0x00ffc072,//-5 + 0x00ffc080,//-4 + 0x00ffc172,//-3 + 0x00ffc180,//-2 + 0x00ffc272,//-1 + 0x00ffc280,//0 + 0x00ffc372,//1 + 0x00ffc380,//2 + 0x00ffc472,//3 + 0x00ffc480,//4 + 0x00ffc572,//5 + 0x00ffc580,//6 + 0x00ffc672,//7 + 0x00ffc680,//8 + 0x00ffc772,//9 + 0x00ffc780,//10 + //low + 0x00ffc872,//11 + 0x00ffc880,//12 + 0x00ffc972,//13 + 0x00ffc980,//14 + 0x00ffca72,//15 + 0x00ffca80,//16 + 0x00ffcb72,//17 + 0x00ffcb80,//18 + 0x00ffcc72,//19 + 0x00ffcc80,//20 + 0x00ffcd72,//21 + 0x00ffcd80,//22 + 0x00ffce72,//23 + 0x00ffce80,//24 + 0x00ffcf72,//26 + 0x00ffcf80,//27 + 0x00ffc072,//-5 + 0x00ffc080,//-4 + 0x00ffc172,//-3 + 0x00ffc180,//-2 + 0x00ffc272,//-1 + 0x00ffc280,//0 + 0x00ffc372,//1 + 0x00ffc380,//2 + 0x00ffc472,//3 + 0x00ffc480,//4 + 0x00ffc572,//5 + 0x00ffc580,//6 + 0x00ffc672,//7 + 0x00ffc680,//8 + 0x00ffc772,//9 + 0x00ffc780,//10 +}; + +const uint32_t txgain_map_femkct_h[96] = +{ + //11b + 0x00ffd872,//15 + 0x00ffd880,//16 + 0x00ffd972,//17 + 0x00ffd980,//18 + 0x00ffd990,//19 + 0x00ffda72,//20 + 0x00ffda80,//21 + 0x00ffdb72,//22 + 0x00ffdb80,//23 + 0x00ffdc72,//24 + 0x00ffdc80,//25 + 0x00ffdd72,//26 + 0x00ffdd80,//27 + 0x00ffde72,//28 + 0x00ffde80,//29 + 0x00ffdf72,//30 + 0x00ffd072,//-1 + 0x00ffd072,//0 + 0x00ffd080,//1 + 0x00ffd172,//2 + 0x00ffd180,//3 + 0x00ffd272,//4 + 0x00ffd280,//5 + 0x00ffd379,//6 + 0x00ffd46d,//7 + 0x00ffd479,//8 + 0x00ffd572,//9 + 0x00ffd580,//10 + 0x00ffd672,//11 + 0x00ffd680,//12 + 0x00ffd772,//13 + 0x00ffd780,//14 + //high + 0x00ffc880,//11 + 0x00ffc972,//12 + 0x00ffc980,//13 + 0x00ffca72,//14 + 0x00ffca80,//15 + 0x00ffcb72,//16 + 0x00ffcb80,//17 + 0x00ffcc72,//18 + 0x00ffcc80,//19 + 0x00ffcc90,//20 + 0x00ffcd72,//21 + 0x00ffcd80,//22 + 0x00ffce72,//23 + 0x00ffce80,//24 + 0x00ffcf72,//25 + 0x00ffcf80,//26 + 0x00ffc080,//-5 + 0x00ffc172,//-4 + 0x00ffc180,//-3 + 0x00ffc272,//-2 + 0x00ffc280,//-1 + 0x00ffc372,//0 + 0x00ffc380,//1 + 0x00ffc472,//2 + 0x00ffc480,//3 + 0x00ffc572,//4 + 0x00ffc580,//5 + 0x00ffc672,//6 + 0x00ffc680,//7 + 0x00ffc772,//8 + 0x00ffc780,//9 + 0x00ffc872,//10 + //low + 0x00ffc880,//11 + 0x00ffc972,//12 + 0x00ffc980,//13 + 0x00ffca72,//14 + 0x00ffca80,//15 + 0x00ffcb72,//16 + 0x00ffcb80,//17 + 0x00ffcc72,//18 + 0x00ffcc80,//19 + 0x00ffcc90,//20 + 0x00ffcd72,//21 + 0x00ffcd80,//22 + 0x00ffce72,//23 + 0x00ffce80,//24 + 0x00ffcf72,//25 + 0x00ffcf80,//26 + 0x00ffc080,//-5 + 0x00ffc172,//-4 + 0x00ffc180,//-3 + 0x00ffc272,//-2 + 0x00ffc280,//-1 + 0x00ffc372,//0 + 0x00ffc380,//1 + 0x00ffc472,//2 + 0x00ffc480,//3 + 0x00ffc572,//4 + 0x00ffc580,//5 + 0x00ffc672,//6 + 0x00ffc680,//7 + 0x00ffc772,//8 + 0x00ffc780,//9 + 0x00ffc872,//10 +}; +#endif + +u32 patch_tbl_func[][2] = +{ + {0x00110054, 0x0018186D}, // same as jump_tbl idx 168 + {0x0011005C, 0x0018186D}, // same as jump_tbl idx 168 +}; + + +u32 patch_tbl_rf_func[][2] = +{ + {0x00110bf0, 0x00180001}, +}; + + +#if !defined(CONFIG_EXT_FEM_8800DCDW) +u32 wifi_txgain_table_24g_8800dcdw[32] = +{ + 0xA4B22189, //index 0 + 0x00007825, + 0xA4B2214B, //index 1 + 0x00007825, + 0xA4B2214F, //index 2 + 0x00007825, + 0xA4B221D5, //index 3 + 0x00007825, + 0xA4B221DC, //index 4 + 0x00007825, + 0xA4B221E5, //index 5 + 0x00007825, + 0xAC9221E5, //index 6 + 0x00006825, + 0xAC9221EF, //index 7 + 0x00006825, + 0xBC9221EE, //index 8 + 0x00006825, + 0xBC9221FF, //index 9 + 0x00006825, + 0xBC9221FF, //index 10 + 0x00004025, + 0xB792203F, //index 11 + 0x00004026, + 0xDC92203F, //index 12 + 0x00004025, + 0xE692203F, //index 13 + 0x00004025, + 0xFF92203F, //index 14 + 0x00004035, + 0xFFFE203F, //index 15 + 0x00004832 +}; + +u32 wifi_txgain_table_24g_1_8800dcdw[32] = +{ + 0x096E2011, //index 0 + 0x00004001, + 0x096E2015, //index 1 + 0x00004001, + 0x096E201B, //index 2 + 0x00004001, + 0x116E2018, //index 3 + 0x00004001, + 0x116E201E, //index 4 + 0x00004001, + 0x116E2023, //index 5 + 0x00004001, + 0x196E2021, //index 6 + 0x00004001, + 0x196E202B, //index 7 + 0x00004001, + 0x216E202B, //index 8 + 0x00004001, + 0x236E2027, //index 9 + 0x00004001, + 0x236E2031, //index 10 + 0x00004001, + 0x246E2039, //index 11 + 0x00004001, + 0x26922039, //index 12 + 0x00004001, + 0x2E92203F, //index 13 + 0x00004001, + 0x3692203F, //index 14 + 0x00004001, + 0x3FF2203F, //index 15 + 0x00004001, +}; + +u32 wifi_txgain_table_24g_8800dcdw_h[32] = +{ + 0xA55629C9, //index 0 + 0x00005825, + 0xAE5629C9, //index 1 + 0x00005825, + 0xAD5629CD, //index 2 + 0x00005825, + 0xAD5629D1, //index 3 + 0x00005825, + 0xAD5629D7, //index 4 + 0x00005825, + 0xAD5629DE, //index 5 + 0x00005825, + 0xAD5629E6, //index 6 + 0x00005825, + 0xBD5629E6, //index 7 + 0x00005825, + 0xBD5629F0, //index 8 + 0x00005825, + 0xCD5629F0, //index 9 + 0x00005825, + 0xE55629F0, //index 10 + 0x00005825, + 0xE55629FF, //index 11 + 0x00005825, + 0xE55629FF, //index 12 + 0x00002825, + 0xE75629FF, //index 13 + 0x00002825, + 0xFF5629FF, //index 14 + 0x00001825, + 0xFF5628FF, //index 15 + 0x00001025, +}; + +u32 wifi_txgain_table_24g_1_8800dcdw_h[32] = +{ + 0x941A2048, //index 0 + 0x00001825, + 0x961A2048, //index 1 + 0x00001825, + 0x9D1A2048, //index 2 + 0x00001825, + 0x9A1A204F, //index 3 + 0x00001825, + 0x961A204F, //index 4 + 0x00001825, + 0x9A1A2057, //index 5 + 0x00001825, + 0x9C1A2057, //index 6 + 0x00001825, + 0xA31A205B, //index 7 + 0x00001825, + 0xAB1A205B, //index 8 + 0x00001825, + 0xAD1A205B, //index 9 + 0x00001825, + 0xA71A2064, //index 10 + 0x00001825, + 0xAD1A2070, //index 11 + 0x00001825, + 0xAD72207F, //index 12 + 0x00001825, + 0xBCAE207F, //index 13 + 0x00001825, + 0xBFB2207F, //index 14 + 0x00001825, + 0xD73A207F, //index 15 + 0x00001825, +}; + +#else /* #ifdef CONFIG_EXT_FEM_8800DCDW */ +// ofdm +uint32_t wifi_txgain_table_24g_8800dcdw_femkct[32] = { + 0x919221C2, //index 0 + 0x00007825, + 0x899221C3, //index 1 + 0x00007825, + 0x8B9221C3, //index 2 + 0x00007825, + 0x929221C3, //index 3 + 0x00007825, + 0x949221C4, //index 4 + 0x00007825, + 0x969221C4, //index 5 + 0x00007825, + 0x949221C6, //index 6 + 0x00007825, + 0x949221C8, //index 7 + 0x00007825, + 0x9C9221C8, //index 8 + 0x00007825, + 0x9C9221CA, //index 9 + 0x00007825, + 0x9C9221CB, //index 10 + 0x00007825, + 0x939221D5, //index 11 + 0x00007825, + 0x9B9221D7, //index 12 + 0x00007825, + 0xA49221D7, //index 13 + 0x00007825, + 0xA79221D7, //index 14 + 0x00007825, + 0xBD9221D7, //index 15 + 0x00007825, +}; + +// 11b +uint32_t wifi_txgain_table_24g_1_8800dcdw_femkct[32] = { + 0x836E20C2, //index 0 + 0x00003024, + 0x856E20C2, //index 1 + 0x00003024, + 0x826E20C3, //index 2 + 0x00003024, + 0x836E20C3, //index 3 + 0x00003024, + 0x856E20C3, //index 4 + 0x00003024, + 0x876E20C3, //index 5 + 0x00003024, + 0x8B6E20C3, //index 6 + 0x00003024, + 0x926E20C4, //index 7 + 0x00003024, + 0x9A6E20C4, //index 8 + 0x00003024, + 0x936E20C5, //index 9 + 0x00003024, + 0x936E20C7, //index 10 + 0x00003024, + 0xA16E20C8, //index 11 + 0x00003024, + 0xA16E20CA, //index 12 + 0x00003024, + 0xA26E20CB, //index 13 + 0x00003024, + 0xAA6E20CD, //index 14 + 0x00003024, + 0xAC7220CF, //index 15 + 0x00003024, +}; +#endif + +u32 wifi_rxgain_table_24g_20m_8800dcdw[64] = { + 0x82f282d1,//index 0 + 0x9591a324, + 0x80808419, + 0x000000f0, + 0x42f282d1,//index 1 + 0x95923524, + 0x80808419, + 0x000000f0, + 0x22f282d1,//index 2 + 0x9592c724, + 0x80808419, + 0x000000f0, + 0x02f282d1,//index 3 + 0x9591a324, + 0x80808419, + 0x000000f0, + 0x06f282d1,//index 4 + 0x9591a324, + 0x80808419, + 0x000000f0, + 0x0ef29ad1,//index 5 + 0x9591a324, + 0x80808419, + 0x000000f0, + 0x0ef29ad3,//index 6 + 0x95923524, + 0x80808419, + 0x000000f0, + 0x0ef29ad7,//index 7 + 0x9595a324, + 0x80808419, + 0x000000f0, + 0x02f282d2,//index 8 + 0x95951124, + 0x80808419, + 0x000000f0, + 0x02f282f4,//index 9 + 0x95951124, + 0x80808419, + 0x000000f0, + 0x02f282e6,//index 10 + 0x9595a324, + 0x80808419, + 0x000000f0, + 0x02f282e6,//index 11 + 0x9599a324, + 0x80808419, + 0x000000f0, + 0x02f282e6,//index 12 + 0x959da324, + 0x80808419, + 0x000000f0, + 0x02f282e6,//index 13 + 0x959f5924, + 0x80808419, + 0x000000f0, + 0x06f282e6,//index 14 + 0x959f5924, + 0x80808419, + 0x000000f0, + 0x0ef29ae6,//index 15 + 0x959f5924, //loft [35:34]=3 + 0x80808419, + 0x000000f0 +}; + +u32 wifi_rxgain_table_24g_40m_8800dcdw[64] = { + 0x83428151,//index 0 + 0x9631a328, + 0x80808419, + 0x000000f0, + 0x43428151,//index 1 + 0x96323528, + 0x80808419, + 0x000000f0, + 0x23428151,//index 2 + 0x9632c728, + 0x80808419, + 0x000000f0, + 0x03428151,//index 3 + 0x9631a328, + 0x80808419, + 0x000000f0, + 0x07429951,//index 4 + 0x9631a328, + 0x80808419, + 0x000000f0, + 0x0f42d151,//index 5 + 0x9631a328, + 0x80808419, + 0x000000f0, + 0x0f42d153,//index 6 + 0x96323528, + 0x80808419, + 0x000000f0, + 0x0f42d157,//index 7 + 0x9635a328, + 0x80808419, + 0x000000f0, + 0x03428152,//index 8 + 0x96351128, + 0x80808419, + 0x000000f0, + 0x03428174,//index 9 + 0x96351128, + 0x80808419, + 0x000000f0, + 0x03428166,//index 10 + 0x9635a328, + 0x80808419, + 0x000000f0, + 0x03428166,//index 11 + 0x9639a328, + 0x80808419, + 0x000000f0, + 0x03428166,//index 12 + 0x963da328, + 0x80808419, + 0x000000f0, + 0x03428166,//index 13 + 0x963f5928, + 0x80808419, + 0x000000f0, + 0x07429966,//index 14 + 0x963f5928, + 0x80808419, + 0x000000f0, + 0x0f42d166,//index 15 + 0x963f5928, + 0x80808419, + 0x000000f0 +}; + +//adap test +u32 adaptivity_patch_tbl[][2] = { + {0x000C, 0x0000320A}, //linkloss_thd + {0x009C, 0x00000000}, //ac_param_conf + {0x0128, 0xF6140001}, //tx_adaptivity_en +}; +//adap test + +#ifdef CONFIG_LOAD_BT_PATCH_IN_FDRV +int rwnx_get_patch_addr_from_patch_table(struct rwnx_hw *rwnx_hw, char *filename, uint32_t *fw_patch_base_addr) +{ + int size; + int ret = 0; + uint8_t *rawdata=NULL; + uint8_t *p = NULL; + uint32_t *data = NULL; + uint32_t type = 0, len = 0; + int j; + + /* load aic firmware */ + size = rwnx_request_firmware_common(rwnx_hw, (u32 **)&rawdata, filename); + + /* Copy the file on the Embedded side */ + printk("### Upload %s fw_patch_table, size=%d\n", filename, size); + + if (size <= 0) { + printk("wrong size of firmware file\n"); + ret = -1; + goto err; + } + + p = rawdata; + + if (memcmp(p, AICBT_PT_TAG, sizeof(AICBT_PT_TAG) < 16 ? sizeof(AICBT_PT_TAG) : 16)) { + printk("TAG err\n"); + ret = -1; + goto err; + } + p += 16; + + while (p - rawdata < size) { + printk("size = %d p - rawdata = 0x%0lx \r\n", size, p - rawdata); + p += 16; + + type = *(uint32_t *)p; + p += 4; + + len = *(uint32_t *)p; + p += 4; + printk("cur->type %x, len %d\n", type, len); + + if(type >= 1000 ) {//Temp Workaround + len = 0; + }else{ + data = (uint32_t *)p; + if (type == AICBT_PT_NULL) { + *(fw_patch_base_addr) = *(data + 3); + printk("addr found %x\n", *(fw_patch_base_addr)); + for (j = 0; j < len; j++) { + printk("addr %x\n", *(data+j)); + } + break; + } + p += len * 8; + } + } + + vfree(rawdata); + return ret; +err: + //aicbt_patch_table_free(&head); + + if (rawdata){ + vfree(rawdata); + } + return ret; +} + +int rwnx_patch_table_free(struct aicbt_patch_table **head) +{ + struct aicbt_patch_table *p = *head, *n = NULL; + while (p) { + n = p->next; + vfree(p->name); + vfree(p->data); + vfree(p); + p = n; + } + *head = NULL; + return 0; +} + +int rwnx_patch_table_load(struct rwnx_hw *rwnx_hw, struct aicbt_patch_table *_head) +{ + struct aicbt_patch_table *head, *p; + int ret = 0, i; + uint32_t *data = NULL; + + head = _head; + for (p = head; p != NULL; p = p->next) { + data = p->data; + if(AICBT_PT_BTMODE == p->type){ + *(data + 1) = aicbsp_info.hwinfo < 0; + *(data + 3) = aicbsp_info.hwinfo; + *(data + 5) = aicbsp_info.cpmode; + + *(data + 7) = aicbt_info.btmode; + *(data + 9) = aicbt_info.btport; + *(data + 11) = aicbt_info.uart_baud; + *(data + 13) = aicbt_info.uart_flowctrl; + *(data + 15) = aicbt_info.lpm_enable; + *(data + 17) = aicbt_info.txpwr_lvl; + + } + if (p->type == AICBT_PT_NULL || p->type == AICBT_PT_PWRON) { + continue; + } + + + if (p->type == AICBT_PT_VER) { + char *data_s = (char *)p->data; + printk("patch version %s\n", data_s); + continue; + } + + if (p->len == 0) { + printk("len is 0\n"); + continue; + } + + for (i = 0; i < p->len; i++) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, *data, *(data + 1)); + if (ret != 0) + return ret; + data += 2; + } + + } + rwnx_patch_table_free(&head); + return 0; +} + + + + +int rwnx_patch_table_download(struct rwnx_hw *rwnx_hw, char *filename) +{ + struct aicbt_patch_table *head = NULL; + struct aicbt_patch_table *new = NULL; + struct aicbt_patch_table *cur = NULL; + int size; + int ret = 0; + uint8_t *rawdata=NULL; + uint8_t *p = NULL; + + /* load aic firmware */ + size = rwnx_request_firmware_common(rwnx_hw, (u32 **)&rawdata, filename); + + /* Copy the file on the Embedded side */ + printk("### Upload %s fw_patch_table, size=%d\n", filename, size); + + if (size <= 0) { + printk("wrong size of firmware file\n"); + ret = -1; + goto err; + } + + p = rawdata; + + if (memcmp(p, AICBT_PT_TAG, sizeof(AICBT_PT_TAG) < 16 ? sizeof(AICBT_PT_TAG) : 16)) { + printk("TAG err\n"); + ret = -1; + goto err; + } + p += 16; + + while (p - rawdata < size) { + printk("size = %d p - rawdata = 0x%0lx \r\n", size, p - rawdata); + new = (struct aicbt_patch_table *)vmalloc(sizeof(struct aicbt_patch_table)); + memset(new, 0, sizeof(struct aicbt_patch_table)); + if (head == NULL) { + head = new; + cur = new; + } else { + cur->next = new; + cur = cur->next; + } + + cur->name = (char *)vmalloc(sizeof(char) * 16); + memset(cur->name, 0, sizeof(char) * 16); + memcpy(cur->name, p, 16); + p += 16; + + cur->type = *(uint32_t *)p; + p += 4; + + cur->len = *(uint32_t *)p; + p += 4; + printk("cur->type %x, len %d\n", cur->type, cur->len); + + if((cur->type ) >= 1000 ) {//Temp Workaround + cur->len = 0; + }else{ + cur->data = (uint32_t *)vmalloc(sizeof(uint8_t) * cur->len * 8); + memset(cur->data, 0, sizeof(uint8_t) * cur->len * 8); + memcpy(cur->data, p, cur->len * 8); + p += cur->len * 8; + } + } + + vfree(rawdata); + rwnx_patch_table_load(rwnx_hw, head); + printk("fw_patch_table download complete\n\n"); + + return ret; +err: + //aicbt_patch_table_free(&head); + + if (rawdata){ + vfree(rawdata); + } + return ret; +} +#endif + +#ifdef CONFIG_DPD +rf_misc_ram_lite_t dpd_res; +#endif + +#ifdef CONFIG_LOFT_CALIB +rf_misc_ram_lite_t loft_res_local; +#endif + +int aicwf_patch_table_load(struct rwnx_hw *rwnx_hw, char *filename) +{ + int err = 0; + unsigned int i = 0, size; + u32 *dst = NULL; + u8 *describle; + u32 fmacfw_patch_tbl_8800dc_u02_describe_size = 124; + u32 fmacfw_patch_tbl_8800dc_u02_describe_base;//read from patch_tbl + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Upload %s \n", filename); + + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (!dst) { + AICWFDBG(LOGERROR, "No such file or directory\n"); + return -1; + } + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + err = -1; + } + + AICWFDBG(LOGINFO, "tbl size = %d \n",size); + + fmacfw_patch_tbl_8800dc_u02_describe_base = dst[0]; + AICWFDBG(LOGINFO, "FMACFW_PATCH_TBL_8800DC_U02_DESCRIBE_BASE = %x \n",fmacfw_patch_tbl_8800dc_u02_describe_base); + + if (!err && (i < size)) { + err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fmacfw_patch_tbl_8800dc_u02_describe_base, fmacfw_patch_tbl_8800dc_u02_describe_size + 4, dst); + if(err){ + printk("write describe information fail \n"); + } + + describle = kzalloc(fmacfw_patch_tbl_8800dc_u02_describe_size, GFP_KERNEL); + memcpy(describle, &dst[1], fmacfw_patch_tbl_8800dc_u02_describe_size); + AICWFDBG(LOGINFO, "%s", describle); + kfree(describle); + describle = NULL; + } + + if (!err && (i < size)) { + for (i =(128/4); i < (size/4); i +=2) { + AICWFDBG(LOGERROR, "patch_tbl: %x %x\n", dst[i], dst[i+1]); + err = rwnx_send_dbg_mem_write_req(rwnx_hw, dst[i], dst[i+1]); + } + if (err) { + AICWFDBG(LOGERROR, "bin upload fail: %x, err:%d\r\n", dst[i], err); + } + } + + if (dst) { + rwnx_release_firmware_common(&dst); + } + + return err; + +} + +#define PATCH_VARGRP_MAGIC_NUM 0x47564150 +#define USER_CHAN_MAX_TXPWR_EN_FLAG (0x01U << 1) + +int aicwf_patch_var_config_8800dc(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + uint32_t rd_patch_addr, rd_var_magic_addr, rd_ext_flags_addr; + uint32_t rd_ext_flags_val, wr_ext_flags_val; + struct dbg_mem_read_cfm cfm; + if (chip_sub_id != 2) { + AICWFDBG(LOGERROR, "unsupport sub_id: %d\r\n", chip_sub_id); + return 0; + } + rd_patch_addr = ROM_FMAC_PATCH_ADDR; + rd_var_magic_addr = rd_patch_addr; + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, rd_var_magic_addr, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "var_magic rd fail: %d\r\n", ret); + return ret; + } + if (cfm.memdata != PATCH_VARGRP_MAGIC_NUM) { + AICWFDBG(LOGINFO, "old patch file: %s\r\n", RWNX_MAC_PATCH_NAME2_8800DC_H_U02); + return 0; + } + rd_ext_flags_addr = rd_patch_addr + 4; + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, rd_ext_flags_addr, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "ext_flags rd fail: %d\r\n", ret); + return ret; + } + rd_ext_flags_val = cfm.memdata; + wr_ext_flags_val = cfm.memdata; + AICWFDBG(LOGINFO, "rd ext_flags: 0x%x\r\n", rd_ext_flags_val); + #ifdef CONFIG_POWER_LIMIT + wr_ext_flags_val = rd_ext_flags_val | USER_CHAN_MAX_TXPWR_EN_FLAG; + #endif + if (wr_ext_flags_val != rd_ext_flags_val) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, rd_ext_flags_addr, wr_ext_flags_val); + if (ret) { + AICWFDBG(LOGERROR, "ext_flags wr fail: %d\r\n", ret); + return ret; + } + AICWFDBG(LOGINFO, "wr ext_flags: 0x%x\r\n", wr_ext_flags_val); + } + return ret; +} + +//adap test +extern int get_adap_test(void); +//adap test + +void aicwf_patch_config_8800dc(struct rwnx_hw *rwnx_hw) +{ + #ifdef CONFIG_ROM_PATCH_EN + int ret = 0; + int cnt = 0; + +//adap test + int adap_test = 0; + int adap_patch_num = 0; + + adap_test = get_adap_test(); +//adap test + + if (testmode == 0) { + const u32 cfg_base = 0x10164; + struct dbg_mem_read_cfm cfm; + int i; + u32 wifisetting_cfg_addr; + u32 ldpc_cfg_addr; + u32 agc_cfg_addr; + u32 txgain_cfg_addr; + u32 jump_tbl_addr = 0; + + u32 patch_tbl_wifisetting_num = sizeof(patch_tbl_wifisetting)/sizeof(u32)/2; + u32 ldpc_cfg_size = sizeof(ldpc_cfg_ram); + u32 agc_cfg_size = sizeof(agc_cfg_ram); + u32 txgain_cfg_size, *txgain_cfg_array; + u32 jump_tbl_size = 0; + u32 patch_tbl_func_num = 0; + + array2_tbl_t jump_tbl_base = NULL; + array2_tbl_t patch_tbl_func_base = NULL; + + if (chip_sub_id == 0) { + jump_tbl_base = jump_tbl; + jump_tbl_size = sizeof(jump_tbl)/2; + patch_tbl_func_base = patch_tbl_func; + patch_tbl_func_num = sizeof(patch_tbl_func)/sizeof(u32)/2; + } + + //struct dbg_mem_read_cfm cfm; + //int i; + + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base, &cfm))) { + AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base, ret); + } + wifisetting_cfg_addr = cfm.memdata; + + if(chip_sub_id == 0){ + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 4, &cfm))) { + AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 4, ret); + } + jump_tbl_addr = cfm.memdata; + } + + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 8, &cfm))) { + AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 8, ret); + } + ldpc_cfg_addr = cfm.memdata; + + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0xc, &cfm))) { + AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 0xc, ret); + } + agc_cfg_addr = cfm.memdata; + + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x10, &cfm))) { + AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 0x10, ret); + } + txgain_cfg_addr = cfm.memdata; + + AICWFDBG(LOGINFO, "wifisetting_cfg_addr=%x, ldpc_cfg_addr=%x, agc_cfg_addr=%x, txgain_cfg_addr=%x\n", wifisetting_cfg_addr, ldpc_cfg_addr, agc_cfg_addr, txgain_cfg_addr); + + for (cnt = 0; cnt < patch_tbl_wifisetting_num; cnt++) { + if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, wifisetting_cfg_addr + patch_tbl_wifisetting[cnt][0], patch_tbl_wifisetting[cnt][1]))) { + AICWFDBG(LOGERROR, "wifisetting %x write fail\n", patch_tbl_wifisetting[cnt][0]); + } + } + +//adap test + if(adap_test){ + adap_patch_num = sizeof(adaptivity_patch_tbl)/sizeof(u32)/2; + for(cnt = 0; cnt < adap_patch_num; cnt++) + { + if((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, wifisetting_cfg_addr + adaptivity_patch_tbl[cnt][0], adaptivity_patch_tbl[cnt][1]))) { + AICWFDBG(LOGERROR, "%x write fail\n", wifisetting_cfg_addr + adaptivity_patch_tbl[cnt][0]); + } + } + } +//adap test + + if (ldpc_cfg_size > 512) {// > 0.5KB data + for (i = 0; i < (ldpc_cfg_size - 512); i += 512) {//each time write 0.5KB + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ldpc_cfg_addr + i, 512, ldpc_cfg_ram + i / 4); + if (ret) { + AICWFDBG(LOGERROR, "ldpc upload fail: %x, err:%d\r\n", ldpc_cfg_addr + i, ret); + break; + } + } + } + + if (!ret && (i < ldpc_cfg_size)) {// < 0.5KB data + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ldpc_cfg_addr + i, ldpc_cfg_size - i, ldpc_cfg_ram + i / 4); + if (ret) { + AICWFDBG(LOGERROR, "ldpc upload fail: %x, err:%d\r\n", ldpc_cfg_addr + i, ret); + } + } + + if (agc_cfg_size > 512) {// > 0.5KB data + for (i = 0; i < (agc_cfg_size - 512); i += 512) {//each time write 0.5KB + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, agc_cfg_addr + i, 512, agc_cfg_ram + i / 4); + if (ret) { + AICWFDBG(LOGERROR, "agc upload fail: %x, err:%d\r\n", agc_cfg_addr + i, ret); + break; + } + } + } + + if (!ret && (i < agc_cfg_size)) {// < 0.5KB data + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, agc_cfg_addr + i, agc_cfg_size - i, agc_cfg_ram + i / 4); + if (ret) { + AICWFDBG(LOGERROR, "agc upload fail: %x, err:%d\r\n", agc_cfg_addr + i, ret); + } + } + + #if !defined(CONFIG_FPGA_VERIFICATION) + if ((IS_CHIP_ID_H())) { + #if defined(CONFIG_EXT_FEM_8800DCDW) + txgain_cfg_size = sizeof(txgain_map_femkct_h); + txgain_cfg_array = (u32 *)txgain_map_femkct_h; + #else + txgain_cfg_size = sizeof(txgain_map_h); + txgain_cfg_array = (u32 *)txgain_map_h; + #endif + } else { + #if defined(CONFIG_EXT_FEM_8800DCDW) + txgain_cfg_size = sizeof(txgain_map_femkct); + txgain_cfg_array = (u32 *)txgain_map_femkct; + #else + txgain_cfg_size = sizeof(txgain_map); + txgain_cfg_array = (u32 *)txgain_map; + #endif + } + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, txgain_cfg_addr, txgain_cfg_size, txgain_cfg_array); + if (ret) { + AICWFDBG(LOGERROR, "txgain upload fail: %x, err:%d\r\n", txgain_cfg_addr, ret); + } + + if(chip_sub_id == 0 ){ + for (cnt = 0; cnt < jump_tbl_size/4; cnt+=1) { + AICWFDBG(LOGDEBUG, "%x = %x\n", jump_tbl_base[cnt][0]*4+jump_tbl_addr, jump_tbl_base[cnt][1]); + if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, jump_tbl_base[cnt][0]*4+jump_tbl_addr, jump_tbl_base[cnt][1]))) { + AICWFDBG(LOGERROR, "%x write fail\n", jump_tbl_addr+8*cnt); + } + } + for (cnt = 0; cnt < patch_tbl_func_num; cnt++) { + if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, patch_tbl_func_base[cnt][0], patch_tbl_func_base[cnt][1]))) { + AICWFDBG(LOGERROR, "patch_tbl_func %x write fail\n", patch_tbl_func_base[cnt][0]); + } + } + } else if (chip_sub_id == 1) { + ret = aicwf_patch_table_load(rwnx_hw, RWNX_MAC_PATCH_TABLE_8800DC_U02); + if(ret){ + printk("patch_tbl upload fail: err:%d\r\n", ret); + } +#ifdef CONFIG_FOR_IPCAM + if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, 0x00111944, 0x00000101))) { + AICWFDBG(LOGERROR, "patch_tbl_func %x write fail\n", patch_tbl_func_base[cnt][0]); + } +#endif + } else if (chip_sub_id == 2) { + ret = aicwf_patch_table_load(rwnx_hw, RWNX_MAC_PATCH_TABLE_8800DC_H_U02); + if(ret){ + printk("patch_tbl upload fail: err:%d\r\n", ret); + } + ret = aicwf_patch_var_config_8800dc(rwnx_hw); + if (ret) { + printk("patch_var cfg fail: err:%d\r\n", ret); + } + } else { + printk("unsupported id: %d\n", chip_sub_id); + } + + #endif + } else { + if (chip_sub_id == 0) { + u32 patch_tbl_rf_func_num = sizeof(patch_tbl_rf_func)/sizeof(u32)/2; + for (cnt = 0; cnt < patch_tbl_rf_func_num; cnt++) { + if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, patch_tbl_rf_func[cnt][0], patch_tbl_rf_func[cnt][1]))) { + AICWFDBG(LOGERROR, "patch_tbl_rf_func %x write fail\n", patch_tbl_rf_func[cnt][0]); + } + } + } + } + #endif +} + + + +int aicwf_set_rf_config_8800dc(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm){ + int ret = 0; + + if ((ret = rwnx_send_txpwr_lvl_req(rwnx_hw))) { + return -1; + } + + if ((ret = rwnx_send_txpwr_ofst_req(rwnx_hw))) { + return -1; + } + + + if (testmode == FW_NORMAL_MODE) { + #if !defined(CONFIG_EXT_FEM_8800DCDW) + if (IS_CHIP_ID_H()) { + if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 1, (u8_l *)wifi_txgain_table_24g_8800dcdw_h, 128))) + return -1; + if ((ret = rwnx_send_rf_config_req(rwnx_hw, 16, 1, (u8_l *)wifi_txgain_table_24g_1_8800dcdw_h, 128))) + return -1; + } else { + if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 1, (u8_l *)wifi_txgain_table_24g_8800dcdw, 128))) + return -1; + if ((ret = rwnx_send_rf_config_req(rwnx_hw, 16, 1, (u8_l *)wifi_txgain_table_24g_1_8800dcdw, 128))) + return -1; + } + #else /* #ifdef CONFIG_EXT_FEM_8800DCDW */ + { + ret = rwnx_send_rf_config_req(rwnx_hw, 0, 1, (u8_l *)wifi_txgain_table_24g_8800dcdw_femkct, 128); + if (ret) { + return -1; + } + ret = rwnx_send_rf_config_req(rwnx_hw, 16, 1, (u8_l *)wifi_txgain_table_24g_1_8800dcdw_femkct, 128); + if (ret) { + return -1; + } + } + #endif + + if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 0, (u8_l *)wifi_rxgain_table_24g_20m_8800dcdw, 256))) + return -1; + + if ((ret = rwnx_send_rf_config_req(rwnx_hw, 32, 0, (u8_l *)wifi_rxgain_table_24g_40m_8800dcdw, 256))) + return -1; + + if ((ret = rwnx_send_rf_calib_req(rwnx_hw, cfm))) { + return -1; + } + } else if (testmode == FW_RFTEST_MODE) { + if (chip_sub_id >= 1) { + #ifdef CONFIG_DPD + #ifndef CONFIG_FORCE_DPD_CALIB + if (is_file_exist(FW_DPDRESULT_NAME_8800DC) == 1) { + AICWFDBG(LOGINFO, "%s load dpd bin\n", __func__); + ret = aicwf_dpd_result_load_8800dc(rwnx_hw, &dpd_res); + if (ret) { + AICWFDBG(LOGINFO, "load dpd bin fail: %d\n", ret); + return ret; + } + } + #endif + if (dpd_res.bit_mask[1]) { + ret = aicwf_dpd_result_apply_8800dc(rwnx_hw, &dpd_res); + if (ret) { + AICWFDBG(LOGINFO, "apply dpd bin fail: %d\n", ret); + return ret; + } + } + #elif defined(CONFIG_LOFT_CALIB) + if (loft_res_local.bit_mask[1]) { + ret = aicwf_loft_result_apply_8800dc(rwnx_hw, &loft_res_local); + if (ret) { + AICWFDBG(LOGINFO, "apply loft res fail: %d\n", ret); + return ret; + } + } + #else + { + ret = aicwf_misc_ram_init_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "misc ram init fail: %d\n", ret); + return ret; + } + } + #endif + ret = rwnx_send_rf_calib_req(rwnx_hw, cfm); + if (ret) { + AICWFDBG(LOGINFO, "rf calib req fail: %d\n", ret); + return ret; + } + } + } + + return 0 ; +} + +extern char aic_fw_path[200]; + +int aicwf_plat_patch_load_8800dc(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; +#if !defined(CONFIG_FPGA_VERIFICATION) + if (chip_sub_id == 0) { + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_PATCH_ADDR, RWNX_MAC_PATCH_NAME2_8800DC); + } else if (chip_sub_id == 1) { +#ifdef CONFIG_LOAD_BT_PATCH_IN_FDRV + uint32_t fw_ram_patch_base_addr = FW_RAM_PATCH_BASE_ADDR; + ret = rwnx_get_patch_addr_from_patch_table(rwnx_hw, FW_PATCH_TABLE_NAME_U02, &fw_ram_patch_base_addr); + //bt patch + printk("%s %x\n", __func__, fw_ram_patch_base_addr); + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, FW_8800DC_U02_ADID_ADDR, FW_ADID_BASE_NAME); + //fw_ram_patch_base_addr, FW_PATCH_BASE_NAME_U02 + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, fw_ram_patch_base_addr, FW_PATCH_BASE_NAME_U02); + //bt patch table + ret = rwnx_patch_table_download(rwnx_hw, FW_PATCH_TABLE_NAME_U02); +#endif + //wifi patch + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_PATCH_ADDR, RWNX_MAC_PATCH_NAME2_8800DC_U02); + } else if (chip_sub_id == 2) { +#ifdef CONFIG_LOAD_BT_PATCH_IN_FDRV + uint32_t fw_ram_patch_base_addr = FW_RAM_PATCH_BASE_ADDR; + ret = rwnx_get_patch_addr_from_patch_table(rwnx_hw, FW_PATCH_TABLE_NAME_U02, &fw_ram_patch_base_addr); + //bt patch + printk("%s %x\n", __func__, fw_ram_patch_base_addr); + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, FW_8800DC_U02_ADID_ADDR, FW_ADID_BASE_NAME); + //fw_ram_patch_base_addr, FW_PATCH_BASE_NAME_U02 + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, fw_ram_patch_base_addr, FW_PATCH_BASE_NAME_U02H); + //bt patch table + ret = rwnx_patch_table_download(rwnx_hw, FW_PATCH_TABLE_NAME_U02H); +#endif + //wifi patch + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_PATCH_ADDR, RWNX_MAC_PATCH_NAME2_8800DC_H_U02); + } else { + printk("unsupported id: %d\n", chip_sub_id); + } +#endif + return ret; +} + +int aicwf_plat_rftest_load_8800dc(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, RAM_LMAC_FW_ADDR, RWNX_MAC_FW_RF_BASE_NAME_8800DC); + if (ret) { + AICWFDBG(LOGINFO, "load rftest bin fail: %d\n", ret); + return ret; + } + return ret; +} + +#if defined(CONFIG_DPD) || defined(CONFIG_LOFT_CALIB) +int aicwf_misc_ram_valid_check_8800dc(struct rwnx_hw *rwnx_hw, int *valid_out) +{ + int ret = 0; + uint32_t cfg_base = 0x10164; + struct dbg_mem_read_cfm cfm; + uint32_t misc_ram_addr; + uint32_t ram_base_addr, ram_word_cnt; + uint32_t bit_mask[4]; + int i; + if (valid_out) { + *valid_out = 0; + } + if (testmode == FW_RFTEST_MODE) { + uint32_t vect1 = 0; + uint32_t vect2 = 0; + cfg_base = RAM_LMAC_FW_ADDR + 0x0004; + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "cfg_base:%x vcet1 rd fail: %d\n", cfg_base, ret); + return ret; + } + vect1 = cfm.memdata; + if ((vect1 & 0xFFFF0000) != (RAM_LMAC_FW_ADDR & 0xFFFF0000)) { + AICWFDBG(LOGERROR, "vect1 invalid: %x\n", vect1); + return ret; + } + cfg_base = RAM_LMAC_FW_ADDR + 0x0008; + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "cfg_base:%x vcet2 rd fail: %d\n", cfg_base, ret); + return ret; + } + vect2 = cfm.memdata; + if ((vect2 & 0xFFFF0000) != (RAM_LMAC_FW_ADDR & 0xFFFF0000)) { + AICWFDBG(LOGERROR, "vect2 invalid: %x\n", vect2); + return ret; + } + cfg_base = RAM_LMAC_FW_ADDR + 0x0164; + } + // init misc ram + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret); + return ret; + } + misc_ram_addr = cfm.memdata; + AICWFDBG(LOGERROR, "misc_ram_addr=%x\n", misc_ram_addr); + // bit_mask + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, bit_mask); + ram_word_cnt = (MEMBER_SIZE(rf_misc_ram_t, bit_mask) + MEMBER_SIZE(rf_misc_ram_t, reserved)) / 4; + for (i = 0; i < ram_word_cnt; i++) { + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, ram_base_addr + i * 4, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "bit_mask[0x%x] rd fail: %d\n", ram_base_addr + i * 4, ret); + return ret; + } + bit_mask[i] = cfm.memdata; + } + AICWFDBG(LOGTRACE, "bit_mask:%x,%x,%x,%x\n",bit_mask[0],bit_mask[1],bit_mask[2],bit_mask[3]); + if ((bit_mask[0] == 0) && ((bit_mask[1] & 0xFFF00000) == 0x80000000) && + (bit_mask[2] == 0) && ((bit_mask[3] & 0xFFFFFF00) == 0x00000000)) { + if (valid_out) { + *valid_out = 1; + } + } + return ret; +} + +int aicwf_plat_calib_load_8800dc(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + if (chip_sub_id == 1) { + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_CALIB_ADDR, RWNX_MAC_CALIB_NAME_8800DC_U02); + if (ret) { + AICWFDBG(LOGINFO, "load calib bin fail: %d\n", ret); + return ret; + } + } else if (chip_sub_id == 2) { + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_CALIB_ADDR, RWNX_MAC_CALIB_NAME_8800DC_H_U02); + if (ret) { + AICWFDBG(LOGINFO, "load calib bin fail: %d\n", ret); + return ret; + } + } + return ret; +} +#endif + +int aicwf_misc_ram_init_8800dc(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + uint32_t cfg_base = 0x10164; + struct dbg_mem_read_cfm cfm; + uint32_t misc_ram_addr; + uint32_t misc_ram_size = 12; + int i; + if (testmode == FW_RFTEST_MODE) { + cfg_base = RAM_LMAC_FW_ADDR + 0x0164; + } + // init misc ram + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret); + return ret; + } + misc_ram_addr = cfm.memdata; + AICWFDBG(LOGERROR, "misc_ram_addr=%x\n", misc_ram_addr); + for (i = 0; i < (misc_ram_size / 4); i++) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, misc_ram_addr + i * 4, 0); + if (ret) { + AICWFDBG(LOGERROR, "rf misc ram[0x%x] wr fail: %d\n", misc_ram_addr + i * 4, ret); + return ret; + } + } + return ret; +} + +#ifdef CONFIG_DPD +int aicwf_dpd_calib_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *dpd_res) +{ + int ret = 0; + uint32_t fw_addr, boot_type; + int valid_flag; + ret = aicwf_misc_ram_valid_check_8800dc(rwnx_hw, &valid_flag); + if (ret) { + AICWFDBG(LOGINFO, "misc ram check fail: %d\n", ret); + return ret; + } + if (valid_flag) { + AICWFDBG(LOGINFO, "misc ram valid, skip calib process\n"); + return ret; + } + ret = aicwf_plat_calib_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load calib bin fail: %d\n", ret); + return ret; + } + /* fw start */ + fw_addr = 0x00130009; + boot_type = HOST_START_APP_FNCALL; + AICWFDBG(LOGINFO, "Start app: %08x, %d\n", fw_addr, boot_type); + ret = rwnx_send_dbg_start_app_req(rwnx_hw, fw_addr, boot_type); + if (ret) { + AICWFDBG(LOGINFO, "start app fail: %d\n", ret); + return ret; + } + { // read dpd res + const uint32_t cfg_base = 0x10164; + struct dbg_mem_read_cfm cfm; + uint32_t misc_ram_addr; + uint32_t ram_base_addr, ram_word_cnt; + int i; + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret); + return ret; + } + misc_ram_addr = cfm.memdata; + // bit_mask + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, bit_mask); + ram_word_cnt = (MEMBER_SIZE(rf_misc_ram_t, bit_mask) + MEMBER_SIZE(rf_misc_ram_t, reserved)) / 4; + for (i = 0; i < ram_word_cnt; i++) { + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, ram_base_addr + i * 4, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "bit_mask[0x%x] rd fail: %d\n", ram_base_addr + i * 4, ret); + return ret; + } + dpd_res->bit_mask[i] = cfm.memdata; + } + // dpd_high + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, dpd_high); + ram_word_cnt = MEMBER_SIZE(rf_misc_ram_t, dpd_high) / 4; + for (i = 0; i < ram_word_cnt; i++) { + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, ram_base_addr + i * 4, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "dpd_high[0x%x] rd fail: %d\n", ram_base_addr + i * 4, ret); + return ret; + } + dpd_res->dpd_high[i] = cfm.memdata; + } + // loft_res + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, loft_res); + ram_word_cnt = MEMBER_SIZE(rf_misc_ram_t, loft_res) / 4; + for (i = 0; i < ram_word_cnt; i++) { + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, ram_base_addr + i * 4, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "loft_res[0x%x] rd fail: %d\n", ram_base_addr + i * 4, ret); + return ret; + } + dpd_res->loft_res[i] = cfm.memdata; + } + } + return ret; +} + +int aicwf_dpd_result_apply_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *dpd_res) +{ + int ret = 0; + uint32_t cfg_base = 0x10164; + struct dbg_mem_read_cfm cfm; + uint32_t misc_ram_addr; + uint32_t ram_base_addr, ram_byte_cnt; + AICWFDBG(LOGINFO, "bit_mask[1]=%x\n", dpd_res->bit_mask[1]); + if (dpd_res->bit_mask[1] == 0) { + AICWFDBG(LOGERROR, "void dpd_res, bypass it.\n"); + return 0; + } + if (testmode == FW_RFTEST_MODE) { + cfg_base = RAM_LMAC_FW_ADDR + 0x0164; + } + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm))) { + AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret); + return ret; + } + misc_ram_addr = cfm.memdata; + AICWFDBG(LOGINFO, "misc_ram_addr: %x\n", misc_ram_addr); + /* Copy dpd_res on the Embedded side */ + // bit_mask + AICWFDBG(LOGINFO, "bit_mask[0]=%x\n", dpd_res->bit_mask[0]); + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, bit_mask); + ram_byte_cnt = MEMBER_SIZE(rf_misc_ram_t, bit_mask) + MEMBER_SIZE(rf_misc_ram_t, reserved); + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ram_base_addr, ram_byte_cnt, (u32 *)&dpd_res->bit_mask[0]); + if (ret) { + AICWFDBG(LOGERROR, "bit_mask wr fail: %x, ret:%d\r\n", ram_base_addr, ret); + return ret; + } + // dpd_high + AICWFDBG(LOGINFO, "dpd_high[0]=%x\n", dpd_res->dpd_high[0]); + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, dpd_high); + ram_byte_cnt = MEMBER_SIZE(rf_misc_ram_t, dpd_high); + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ram_base_addr, ram_byte_cnt, (u32 *)&dpd_res->dpd_high[0]); + if (ret) { + AICWFDBG(LOGERROR, "dpd_high wr fail: %x, ret:%d\r\n", ram_base_addr, ret); + return ret; + } + // loft_res + AICWFDBG(LOGINFO, "loft_res[0]=%x\n", dpd_res->loft_res[0]); + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, loft_res); + ram_byte_cnt = MEMBER_SIZE(rf_misc_ram_t, loft_res); + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ram_base_addr, ram_byte_cnt, (u32 *)&dpd_res->loft_res[0]); + if (ret) { + AICWFDBG(LOGERROR, "loft_res wr fail: %x, ret:%d\r\n", ram_base_addr, ret); + return ret; + } + return ret; +} + +#ifndef CONFIG_FORCE_DPD_CALIB +int aicwf_dpd_result_load_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *dpd_res) +{ + int ret = 0; + int size; + u32 *dst=NULL; + char *filename = FW_DPDRESULT_NAME_8800DC; + AICWFDBG(LOGINFO, "dpd_res file path:%s \r\n", filename); + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of dpd_res file\n"); + dst = NULL; + return -1; + } + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d, dst[0]=%x\n", filename, size, dst[0]); + memcpy((u8 *)dpd_res, (u8 *)dst, sizeof(rf_misc_ram_lite_t)); + if (dst) { + rwnx_release_firmware_common(&dst); + } + return ret; +} + +#define FW_PATH_MAX_LEN 200 +extern char aic_fw_path[FW_PATH_MAX_LEN]; + +int aicwf_dpd_result_write_8800dc(void *buf, int buf_len) +{ + int sum = 0, len = 0; + char *path = NULL; + struct file *fp = NULL; + loff_t pos = 0; + mm_segment_t fs; + + AICWFDBG(LOGINFO, "%s\n", __func__); + path = __getname(); + if (!path) { + AICWFDBG(LOGINFO, "get path fail\n"); + return -1; + } + + len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s", aic_fw_path, FW_DPDRESULT_NAME_8800DC); + //AICWFDBG(LOGINFO, "%s\n", path); + + fp = filp_open(path, O_RDWR | O_CREAT, 0644); + if (IS_ERR(fp)) { + AICWFDBG(LOGINFO, "fp open fial\n"); + __putname(path); + fp = NULL; + return -1; + } + + fs = get_fs(); + set_fs(KERNEL_DS); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + sum = kernel_write(fp, buf, buf_len, &pos); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + sum = kernel_write(fp, (char *)buf, buf_len, pos); +#else + sum = vfs_write(fp, (char *)buf, buf_len, &pos); +#endif + + set_fs(fs); + __putname(path); + filp_close(fp, NULL); + fp = NULL; + + return 0; +} +#endif /* !CONFIG_FORCE_DPD_CALIB */ +#endif + +#ifdef CONFIG_LOFT_CALIB +int aicwf_loft_calib_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *loft_res) +{ + int ret = 0; + uint32_t fw_addr, boot_type; + int valid_flag; + ret = aicwf_misc_ram_valid_check_8800dc(rwnx_hw, &valid_flag); + if (ret) { + AICWFDBG(LOGINFO, "misc ram check fail: %d\n", ret); + return ret; + } + if (valid_flag) { + AICWFDBG(LOGINFO, "misc ram valid, skip calib process\n"); + return ret; + } + ret = aicwf_plat_calib_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load calib bin fail: %d\n", ret); + return ret; + } + /* fw start */ + fw_addr = 0x00130009; + boot_type = HOST_START_APP_FNCALL; + AICWFDBG(LOGINFO, "Start app: %08x, %d\n", fw_addr, boot_type); + ret = rwnx_send_dbg_start_app_req(rwnx_hw, fw_addr, boot_type); + if (ret) { + AICWFDBG(LOGINFO, "start app fail: %d\n", ret); + return ret; + } + { // read loft res + const uint32_t cfg_base = 0x10164; + struct dbg_mem_read_cfm cfm; + uint32_t misc_ram_addr; + uint32_t ram_base_addr, ram_word_cnt; + int i; + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret); + return ret; + } + misc_ram_addr = cfm.memdata; + // bit_mask + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, bit_mask); + ram_word_cnt = (MEMBER_SIZE(rf_misc_ram_t, bit_mask) + MEMBER_SIZE(rf_misc_ram_t, reserved)) / 4; + for (i = 0; i < ram_word_cnt; i++) { + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, ram_base_addr + i * 4, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "bit_mask[0x%x] rd fail: %d\n", ram_base_addr + i * 4, ret); + return ret; + } + loft_res->bit_mask[i] = cfm.memdata; + } + // loft_res + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, loft_res); + ram_word_cnt = MEMBER_SIZE(rf_misc_ram_t, loft_res) / 4; + for (i = 0; i < ram_word_cnt; i++) { + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, ram_base_addr + i * 4, &cfm); + if (ret) { + AICWFDBG(LOGERROR, "loft_res[0x%x] rd fail: %d\n", ram_base_addr + i * 4, ret); + return ret; + } + loft_res->loft_res[i] = cfm.memdata; + } + } + return ret; +} + +int aicwf_loft_result_apply_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *loft_res) +{ + int ret = 0; + uint32_t cfg_base = 0x10164; + struct dbg_mem_read_cfm cfm; + uint32_t misc_ram_addr; + uint32_t ram_base_addr, ram_byte_cnt; + AICWFDBG(LOGINFO, "bit_mask[1]=%x\n", loft_res->bit_mask[1]); + if (loft_res->bit_mask[1] == 0) { + AICWFDBG(LOGERROR, "void loft_res, bypass it.\n"); + return 0; + } + if (testmode == FW_RFTEST_MODE) { + cfg_base = RAM_LMAC_FW_ADDR + 0x0164; + } + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm))) { + AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret); + return ret; + } + misc_ram_addr = cfm.memdata; + AICWFDBG(LOGINFO, "misc_ram_addr: %x\n", misc_ram_addr); + /* Copy loft_res on the Embedded side */ + // bit_mask + AICWFDBG(LOGINFO, "bit_mask[0]=%x\n", loft_res->bit_mask[0]); + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, bit_mask); + ram_byte_cnt = MEMBER_SIZE(rf_misc_ram_t, bit_mask) + MEMBER_SIZE(rf_misc_ram_t, reserved); + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ram_base_addr, ram_byte_cnt, (u32 *)&loft_res->bit_mask[0]); + if (ret) { + AICWFDBG(LOGERROR, "bit_mask wr fail: %x, ret:%d\r\n", ram_base_addr, ret); + return ret; + } + // loft_res + AICWFDBG(LOGINFO, "loft_res[0]=%x\n", loft_res->loft_res[0]); + ram_base_addr = misc_ram_addr + offsetof(rf_misc_ram_t, loft_res); + ram_byte_cnt = MEMBER_SIZE(rf_misc_ram_t, loft_res); + ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ram_base_addr, ram_byte_cnt, (u32 *)&loft_res->loft_res[0]); + if (ret) { + AICWFDBG(LOGERROR, "loft_res wr fail: %x, ret:%d\r\n", ram_base_addr, ret); + return ret; + } + return ret; +} +#endif + +int rwnx_plat_userconfig_load_8800dc(struct rwnx_hw *rwnx_hw){ + int size; + u32 *dst=NULL; + char *filename = FW_USERCONFIG_NAME_8800DC; + + AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + rwnx_plat_userconfig_parsing((char *)dst, size); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "userconfig download complete\n\n"); + return 0; + +} + +int rwnx_plat_userconfig_load_8800dw(struct rwnx_hw *rwnx_hw){ + int size; + u32 *dst=NULL; + char *filename = FW_USERCONFIG_NAME_8800DW; + + AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + rwnx_plat_userconfig_parsing((char *)dst, size); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "userconfig download complete\n\n"); + return 0; + +} + +#ifdef CONFIG_POWER_LIMIT +extern char country_code[]; + +int rwnx_plat_powerlimit_load_8800dcdw(struct rwnx_hw *rwnx_hw, uint16_t chip_id) +{ + int size; + u32 *dst = NULL; + char *filename; + if (chip_id == PRODUCT_ID_AIC8800DC) { + filename = FW_POWERLIMIT_NAME_8800DC; + } else if (chip_id == PRODUCT_ID_AIC8800DW) { + filename = FW_POWERLIMIT_NAME_8800DW; + } else { + AICWFDBG(LOGERROR, "invalid chip_id: %d\n", chip_id); + return 0; + } + + AICWFDBG(LOGINFO, "powerlimit file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of cfg file\n"); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + rwnx_plat_powerlimit_parsing((char *)dst, size, country_code); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "powerlimit download complete\n\n"); + return 0; +} +#endif + +//Crystal provided by CPU (start) +int set_bbpll_config(struct rwnx_hw *rwnx_hw){ +// {0x40505010, 0x7C301010},//bbpll + int ret = 0; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + + //Read crystal provided by CPU or not. + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x40500148, &rd_mem_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "%x rd fail: %d\n", 0x40500148, ret); + return -1; + } + + AICWFDBG(LOGDEBUG, "%s rd_mem_addr_cfm.memdata:%x \r\n", __func__, rd_mem_addr_cfm.memdata); + + if(!(rd_mem_addr_cfm.memdata & 0x01)){ + AICWFDBG(LOGINFO, "%s Crystal not provided by CPU \r\n", __func__); + return 0; + }else{ + AICWFDBG(LOGINFO, "%s Crystal provided by CPU \r\n", __func__); + //Read 0x40505010 value to check bbpll set or not. + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x40505010, &rd_mem_addr_cfm); + if(ret < 0){ + AICWFDBG(LOGERROR, "%s error ret_val:%d\r\n", __func__, ret); + return -1; + } + + if((rd_mem_addr_cfm.memdata >> 29) == 3){ + AICWFDBG(LOGERROR, "%s Not need to set \r\n", __func__); + return 0; + }else{ + rd_mem_addr_cfm.memdata |= ((0x1 << 29) | (0x1 << 30)); + rd_mem_addr_cfm.memdata &= (~(0x1 << 31)); + rwnx_send_dbg_mem_write_req(rwnx_hw, 0x40505010, rd_mem_addr_cfm.memdata); + } + } + return 0; +} +//Crystal provided by CPU (end) + + +void system_config_8800dc(struct rwnx_hw *rwnx_hw){ + int syscfg_num; + array3_tbl_t p_syscfg_msk_tbl; + int ret, cnt; + const u32 mem_addr = 0x40500000; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &rd_mem_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "%x rd fail: %d\n", mem_addr, ret); + return; + } + chip_id = (u8)(rd_mem_addr_cfm.memdata >> 16); + //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + if (((rd_mem_addr_cfm.memdata >> 25) & 0x01UL) == 0x00UL) { + chip_mcu_id = 1; + } + + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x00000020, &rd_mem_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "[0x00000020] rd fail: %d\n", ret); + return; + } + chip_sub_id = (u8)(rd_mem_addr_cfm.memdata); + //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + AICWFDBG(LOGINFO, "chip_id=%x, chip_sub_id=%x\n", chip_id, chip_sub_id); + + + //Crystal provided by CPU (start) + ret = set_bbpll_config(rwnx_hw); + if (ret) { + AICWFDBG(LOGERROR, "set_bbpll_config fail: %d\n", ret); + return; + } + //Crystal provided by CPU (end) + + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x40500010, &rd_mem_addr_cfm); + printk("[0x40500010]=%x\n", rd_mem_addr_cfm.memdata); + if (ret) { + printk("[0x40500010] rd fail: %d\n", ret); + return; + } + + syscfg_num = sizeof(syscfg_tbl_8800dc) / sizeof(u32) / 2; + + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl_8800dc[cnt][0], syscfg_tbl_8800dc[cnt][1]); + if (ret) { + AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl_8800dc[cnt][0], ret); + return; + } + } + + if (IS_CHIP_ID_H()) { + syscfg_num = sizeof(syscfg_tbl_masked_8800dc_h) / sizeof(u32) / 3; + p_syscfg_msk_tbl = syscfg_tbl_masked_8800dc_h; + } else { + syscfg_num = sizeof(syscfg_tbl_masked_8800dc) / sizeof(u32) / 3; + p_syscfg_msk_tbl = syscfg_tbl_masked_8800dc; + } + + for (cnt = 0; cnt < syscfg_num; cnt++) { + if (p_syscfg_msk_tbl[cnt][0] == 0x00000000) { + break; + } else if (p_syscfg_msk_tbl[cnt][0] == 0x70001000) { + if (chip_mcu_id == 0) { + p_syscfg_msk_tbl[cnt][1] |= ((0x1 << 8) | (0x1 << 15)); // mask + p_syscfg_msk_tbl[cnt][2] |= ((0x1 << 8) | (0x1 << 15)); + } + } + + ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw, + p_syscfg_msk_tbl[cnt][0], p_syscfg_msk_tbl[cnt][1], p_syscfg_msk_tbl[cnt][2]); + if (ret) { + AICWFDBG(LOGERROR, "%x mask write fail: %d\n", p_syscfg_msk_tbl[cnt][0], ret); + return; + } + } + + if (chip_sub_id == 0) { + syscfg_num = sizeof(syscfg_tbl_masked_8800dc_u01) / sizeof(u32) / 3; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw, + syscfg_tbl_masked_8800dc_u01[cnt][0], syscfg_tbl_masked_8800dc_u01[cnt][1], syscfg_tbl_masked_8800dc_u01[cnt][2]); + if (ret) { + AICWFDBG(LOGERROR, "%x mask write fail: %d\n", syscfg_tbl_masked_8800dc_u01[cnt][0], ret); + return; + } + } + } + +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800dc.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800dc.h new file mode 100644 index 0000000..c0e53cb --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_compat_8800dc.h @@ -0,0 +1,62 @@ +#include + +#if defined(CONFIG_DPD) || defined(CONFIG_LOFT_CALIB) +typedef struct { + uint32_t bit_mask[3]; + uint32_t reserved; + uint32_t dpd_high[96]; + uint32_t dpd_11b[96]; + uint32_t dpd_low[96]; + uint32_t idac_11b[48]; + uint32_t idac_high[48]; + uint32_t idac_low[48]; + uint32_t loft_res[18]; + uint32_t rx_iqim_res[16]; +} rf_misc_ram_t; + +typedef struct { + uint32_t bit_mask[4]; + uint32_t dpd_high[96]; + uint32_t loft_res[18]; +} rf_misc_ram_lite_t; + +#define MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#define DPD_RESULT_SIZE_8800DC sizeof(rf_misc_ram_lite_t) +#endif + +#ifdef CONFIG_DPD +extern rf_misc_ram_lite_t dpd_res; +#endif + +#ifdef CONFIG_LOFT_CALIB +extern rf_misc_ram_lite_t loft_res_local; +#endif + +int aicwf_patch_table_load(struct rwnx_hw *rwnx_hw, char *filename); +void aicwf_patch_config_8800dc(struct rwnx_hw *rwnx_hw); +int aicwf_set_rf_config_8800dc(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm); +int aicwf_misc_ram_init_8800dc(struct rwnx_hw *rwnx_hw); +#if defined(CONFIG_DPD) || defined(CONFIG_LOFT_CALIB) +int aicwf_misc_ram_valid_check_8800dc(struct rwnx_hw *rwnx_hw, int *valid_out); +int aicwf_plat_calib_load_8800dc(struct rwnx_hw *rwnx_hw); +#endif +#ifdef CONFIG_DPD +int aicwf_dpd_calib_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *dpd_res); +int aicwf_dpd_result_apply_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *dpd_res); +#ifndef CONFIG_FORCE_DPD_CALIB +int aicwf_dpd_result_load_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *dpd_res); +int aicwf_dpd_result_write_8800dc(void *buf, int buf_len); +#endif +#endif +#ifdef CONFIG_LOFT_CALIB +int aicwf_loft_calib_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *loft_res); +int aicwf_loft_result_apply_8800dc(struct rwnx_hw *rwnx_hw, rf_misc_ram_lite_t *loft_res); +#endif +int aicwf_plat_patch_load_8800dc(struct rwnx_hw *rwnx_hw); +int aicwf_plat_rftest_load_8800dc(struct rwnx_hw *rwnx_hw); +int rwnx_plat_userconfig_load_8800dc(struct rwnx_hw *rwnx_hw); +int rwnx_plat_userconfig_load_8800dw(struct rwnx_hw *rwnx_hw); +#ifdef CONFIG_POWER_LIMIT +int rwnx_plat_powerlimit_load_8800dcdw(struct rwnx_hw *rwnx_hw, uint16_t chip_id); +#endif +void system_config_8800dc(struct rwnx_hw *rwnx_hw); diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_debug.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_debug.h new file mode 100644 index 0000000..bec2500 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_debug.h @@ -0,0 +1,53 @@ + + +#define RWNX_FN_ENTRY_STR ">>> %s()\n", __func__ + + + +/* message levels */ +#define LOGERROR 0x0001 +#define LOGINFO 0x0002 +#define LOGTRACE 0x0004 +#define LOGDEBUG 0x0008 +#define LOGDATA 0x0010 +#define LOGSTEER 0x0020 + +extern int aicwf_dbg_level; +void rwnx_data_dump(char* tag, void* data, unsigned long len); + +#define AICWF_LOG "AICWFDBG(" + +#define AICWFDBG(level, args, arg...) \ +do { \ + if (aicwf_dbg_level & level) { \ + printk(AICWF_LOG#level")\t" args, ##arg); \ + } \ +} while (0) + +#define RWNX_DBG(fmt, ...) \ +do { \ + if (aicwf_dbg_level & LOGTRACE) { \ + printk(AICWF_LOG"LOGTRACE)\t"fmt , ##__VA_ARGS__); \ + } \ +} while (0) + + + +#if 0 +#define RWNX_DBG(fmt, ...) \ + do { \ + if (aicwf_dbg_level & LOGTRACE) { \ + printk(AICWF_LOG"LOGTRACE"")\t" fmt, ##__VA_ARGS__); \ + } \ + } while (0) +#define AICWFDBG(args, level) \ +do { \ + if (aicwf_dbg_level & level) { \ + printk(AICWF_LOG"(%s)\t" ,#level); \ + printf args; \ + } \ +} while (0) +#endif + + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_manager.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_manager.c new file mode 100644 index 0000000..289e108 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_manager.c @@ -0,0 +1,601 @@ +/** + ****************************************************************************** + * + * Copyright (C) 2020 AIC semiconductor. + * + * @file aicwf_manager.c + * + * @brief netlink msg definitions + * + ****************************************************************************** + */ + +#include +#include +#include +#include "aicwf_manager.h" +#include "lmac_mac.h" +#include "aicwf_debug.h" + +#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_ARG(x) ((u8_l *)(x))[0], ((u8_l *)(x))[1], ((u8_l *)(x))[2], ((u8_l *)(x))[3], ((u8_l *)(x))[4], ((u8_l *)(x))[5] +#define MANAGER_STR "[MANAGER] USB " + + +static struct sock *nl_sock = NULL; + +static struct rwnx_vif *nl_rwnx_vif[PHY_BAND_MAX][CONFIG_IFACE_NUMBER] = {{NULL}}; +static u8_l nl_hook[PHY_BAND_MAX][CONFIG_IFACE_NUMBER] = {{0}}; + +static u8_l nl_daemon_on = 0; + +#define FREQ_2G_MIN 2412 +#define FREQ_2G_MAX 2484 +#define FREQ_5G_MIN 5170 +#define FREQ_5G_MAX 5825 +#define FREQ_6G_MIN 5925 +#define FREQ_6G_MAX 7125 + +static int freq_to_channel(int freq) +{ + if (freq >= FREQ_2G_MIN && freq <= FREQ_2G_MAX) { + if (freq == 2484) { + return 14; + } else { + return (freq - 2412) / 5 + 1; + } + } else if (freq >= FREQ_5G_MIN && freq <= FREQ_5G_MAX) { + return (freq - 5000) / 5; + } else if (freq >= FREQ_6G_MIN && freq <= FREQ_6G_MAX) { + return (freq - 5950) / 5; + } else { + AICWFDBG(LOGERROR, MANAGER_STR"aic Unsupported frequency: %d MHz\n", freq); + return -1; + } +} + + +void aicwf_nl_recv_msg(struct sk_buff *skb) +{ + struct rwnx_vif *rwnx_vif; + struct rwnx_sta *sta = NULL; + u8_l rwnx_hook = 0; + u8_l band; + u8_l ssid; + struct nlmsghdr *nlh = NULL; + struct b_nl_message *msg = NULL; + struct b_elm_header *hdr = NULL; + struct b_elm_intf *intf = NULL; + struct b_elm_sta_info *sta_info = NULL; + struct b_elm_roam_info *roam_info = NULL; + u32 offset = 0; + + if (skb == NULL) { + AICWFDBG(LOGERROR, MANAGER_STR"skb is null.\n"); + return; + } + + nlh = (struct nlmsghdr *)skb->data; + msg = (struct b_nl_message *)NLMSG_DATA(nlh); + + AICWFDBG(LOGSTEER, MANAGER_STR"%s, %d\n", __func__, msg->type); + + /* message type */ + switch(msg->type) { + case AIC_NL_DAEMON_ON_TYPE: + nl_daemon_on = 1; + AICWFDBG(LOGSTEER, MANAGER_STR"AIC_NL_DAEMON_ON_TYPE\n"); + for (band = 0; band < PHY_BAND_MAX; band++) { + for (ssid = 0; ssid < CONFIG_IFACE_NUMBER; ssid++) { + rwnx_vif = nl_rwnx_vif[band][ssid]; + rwnx_hook = nl_hook[band][ssid]; + + if (!rwnx_hook) { + AICWFDBG(LOGSTEER, MANAGER_STR"%s, !rwnx_hook, %d,%d\n", __func__, band, ssid); + continue; + } + if (rwnx_vif == NULL || rwnx_vif->up == false) { + AICWFDBG(LOGSTEER, MANAGER_STR"%s, !rwnx_vif, %d,%d\n", __func__, band, ssid); + continue; + } + +#ifdef CONFIG_BAND_STEERING + aicwf_band_steering_init(rwnx_vif); +#endif + + spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock); + list_for_each_entry(sta, &rwnx_vif->ap.sta_list, list){ + if (sta->valid) + aicwf_nl_send_new_sta_msg(rwnx_vif, sta->mac_addr); + } + spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock); + } + } + break; + case AIC_NL_DAEMON_OFF_TYPE: + nl_daemon_on = 0; + break; +#ifdef CONFIG_BAND_STEERING + case AIC_NL_B_STEER_BLOCK_ADD_TYPE: + case AIC_NL_B_STEER_BLOCK_DEL_TYPE: + while (offset < msg->len) { + hdr = (struct b_elm_header *)(msg->content + offset); + offset += ELM_HEADER_LEN; + + switch(hdr->id) { + case AIC_ELM_INTF_ID: + intf = (struct b_elm_intf *)(msg->content + offset); + break; + case AIC_ELM_STA_INFO_ID: + sta_info = (struct b_elm_sta_info *)(msg->content + offset); + break; + default: + AICWFDBG(LOGERROR, MANAGER_STR"unknown element id.\n"); + break; + } + offset += hdr->len; + } + + if (intf && sta_info) { + band = intf->band; + ssid = intf->ssid; + AICWFDBG(LOGSTEER, MANAGER_STR"STEER_BLOCK band: %d, ssid: %d\n", band, ssid); + rwnx_vif = nl_rwnx_vif[band][ssid]; + rwnx_hook = nl_hook[band][ssid]; + + if (!rwnx_hook) { + AICWFDBG(LOGSTEER, MANAGER_STR"rwnx_hook is null p1.\n"); + break; + } + if (rwnx_vif == NULL || rwnx_vif->up == false) { + AICWFDBG(LOGSTEER, MANAGER_STR"rwnx_vif is null/down p1.\n"); + break; + } + + if (msg->type == AIC_NL_B_STEER_BLOCK_ADD_TYPE) + aicwf_band_steering_block_entry_add(rwnx_vif, sta_info->mac); + else if (msg->type == AIC_NL_B_STEER_BLOCK_DEL_TYPE) + aicwf_band_steering_block_entry_del(rwnx_vif, sta_info->mac); + } + break; + case AIC_NL_B_STEER_ROAM_TYPE: + while (offset < msg->len) { + hdr = (struct b_elm_header *)(msg->content + offset); + offset += ELM_HEADER_LEN; + + /* element type: should handle wrong length */ + switch(hdr->id) { + case AIC_ELM_INTF_ID: + intf = (struct b_elm_intf *)(msg->content + offset); + break; + case AIC_ELM_ROAM_INFO_ID: + roam_info = (struct b_elm_roam_info *)(msg->content + offset); + break; + default: + AICWFDBG(LOGERROR, MANAGER_STR"unknown element id.\n"); + break; + } + offset += hdr->len; + } + + if (intf && roam_info) { + band = intf->band; + ssid = intf->ssid; + rwnx_vif = nl_rwnx_vif[band][ssid]; + rwnx_hook = nl_hook[band][ssid]; + + if (!rwnx_hook) { + AICWFDBG(LOGERROR, MANAGER_STR"rwnx_hook is null p2.\n"); + break; + } + if (rwnx_vif == NULL || rwnx_vif->up == false) { + AICWFDBG(LOGERROR, MANAGER_STR"rwnx_vif is null/down p2.\n"); + break; + } + + AICWFDBG(LOGSTEER, MANAGER_STR"AIC_NL_B_STEER_ROAM_TYPE (hostapd_cli)!\n"); + AICWFDBG(LOGSTEER, MANAGER_STR"sta_mac="MAC_FMT"\n", MAC_ARG(roam_info->sta_mac)); + AICWFDBG(LOGSTEER, MANAGER_STR"bss_mac="MAC_FMT" bss_ch=%u method=%s\n", + MAC_ARG(roam_info->bss_mac), + roam_info->bss_ch, + roam_info->method == 0 ? "11V" : "Deauth"); + + if (roam_info->method == 1) + aicwf_band_steering_roam_block_entry_add(rwnx_vif, roam_info->sta_mac); + } + break; +#endif + default: + AICWFDBG(LOGERROR, MANAGER_STR"unknown message type.\n"); + break; + } + + return; +} + +void aicwf_nl_send_msg(void *msg, u32 msg_len) +{ + struct nlmsghdr *nlh = NULL; + struct sk_buff *skb = NULL; + u32 skb_len; + s32 err; + + skb_len = NLMSG_SPACE(NL_MAX_MSG_SIZE); + skb = __dev_alloc_skb(skb_len, GFP_ATOMIC); + if (!skb) { + AICWFDBG(LOGERROR, MANAGER_STR"allocate skb failed.\n"); + goto func_return; + } + + nlh = nlmsg_put(skb, 0, 0, 0, NL_MAX_MSG_SIZE, 0); + if (!nlh) { + AICWFDBG(LOGERROR, MANAGER_STR"put netlink header failed.\n"); + kfree_skb(skb); + goto func_return; + } + + NETLINK_CB(skb).portid = 0; + NETLINK_CB(skb).dst_group = 0; + memset(NLMSG_DATA(nlh), 0, msg_len); + memcpy(NLMSG_DATA(nlh), (u8_l *)msg, msg_len); + + if (!nl_sock) { + AICWFDBG(LOGERROR, MANAGER_STR"[%s %u] nl_sock is NULL\n", __FUNCTION__, __LINE__); + goto msg_fail_skb; + } + + err = netlink_unicast(nl_sock, skb, NL_AIC_MANAGER_PID, MSG_DONTWAIT); + + if (err < 0) { + /* netlink_unicast() already kfree_skb */ + AICWFDBG(LOGSTEER, MANAGER_STR"send netlink unicast failed.\n"); + goto func_return; + } + +func_return: + return; + +msg_fail_skb: + kfree_skb(skb); +} + +static void aicwf_netlink_set_msg( + struct b_nl_message *msg, u32 *msg_len, void *elm, u32 elm_len) +{ + memcpy(msg->content + (*msg_len), elm, elm_len); + (*msg_len) += elm_len; + + return; +} + +void aicwf_nl_send_del_sta_msg(struct rwnx_vif *rwnx_vif, u8_l *mac) +{ + u32 msg_len = 0; + struct b_nl_message msg = {0}; + struct b_elm_header hdr = {0}; + struct b_elm_intf intf = {{0}}; + struct b_elm_sta_info sta_info = {{0}}; + + if (!nl_daemon_on) + return; + + AICWFDBG(LOGSTEER, MANAGER_STR"%s, "MAC_FMT"\n", __func__, MAC_ARG(mac)); + + /* element header */ + hdr.id = AIC_ELM_INTF_ID; + hdr.len = ELM_INTF_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_INTF_ID */ + intf.root = 0; /* TBD */ + intf.band = rwnx_vif->ap.band; + intf.ssid = rwnx_vif->rwnx_hw->iface_idx; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&intf, ELM_INTF_LEN); + + /* element header */ + hdr.id = AIC_ELM_STA_INFO_ID; + hdr.len = ELM_STA_INFO_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_STA_INFO_ID */ + memcpy(sta_info.mac, mac, 6); + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&sta_info, ELM_STA_INFO_LEN); + + /* finish message */ + msg.type = AIC_NL_DEL_STA_TYPE; + msg.len = msg_len; + + /* length += (type + len) */ + msg_len += 8; + aicwf_nl_send_msg((void *)&msg, msg_len); + + return; +} + +void aicwf_nl_send_new_sta_msg(struct rwnx_vif *rwnx_vif, u8_l *mac) +{ + u32 msg_len = 0; + struct b_nl_message msg = {0}; + struct b_elm_header hdr = {0}; + struct b_elm_intf intf = {{0}}; + struct b_elm_sta_info sta_info = {{0}}; + + if (!nl_daemon_on) + return; + + AICWFDBG(LOGSTEER, MANAGER_STR"%s, "MAC_FMT"\n", __func__, MAC_ARG(mac)); + + /* element header */ + hdr.id = AIC_ELM_INTF_ID; + hdr.len = ELM_INTF_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_INTF_ID */ + intf.root = 0; /* TBD */ + intf.band = rwnx_vif->ap.band; + intf.ssid = rwnx_vif->rwnx_hw->iface_idx; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&intf, ELM_INTF_LEN); + + /* element header */ + hdr.id = AIC_ELM_STA_INFO_ID; + hdr.len = ELM_STA_INFO_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_STA_INFO_ID */ + memcpy(sta_info.mac, mac, 6); + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&sta_info, ELM_STA_INFO_LEN); + + /* finish message */ + msg.type = AIC_NL_NEW_STA_TYPE; + msg.len = msg_len; + + /* length += (type + len) */ + msg_len += 8; + aicwf_nl_send_msg((void *)&msg, msg_len); + + return; +} + +void aicwf_nl_send_intf_rpt_msg(struct rwnx_vif *rwnx_vif) +{ + u32 msg_len = 0; + struct b_nl_message msg = {0}; + struct b_elm_header hdr = {0}; + struct b_elm_intf intf = {{0}}; + struct b_elm_intf_info intf_info = {0}; + + if (!nl_daemon_on) + return; + + /* element header */ + hdr.id = AIC_ELM_INTF_ID; + hdr.len = ELM_INTF_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_INTF_ID */ + memcpy(intf.mac, rwnx_vif->ndev->dev_addr, 6); + intf.root = 0; /* TBD */ + intf.band = rwnx_vif->ap.band; + intf.ssid = rwnx_vif->rwnx_hw->iface_idx; + memcpy(intf.name, rwnx_vif->ndev->name, 16); + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&intf, ELM_INTF_LEN); + + /* element header */ + hdr.id = AIC_ELM_INTF_INFO_ID; + hdr.len = ELM_INTF_INFO_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_INTF_INFO_ID */ + intf_info.ch = freq_to_channel(rwnx_vif->ap.freq); + intf_info.tx_tp = 0; /* TBD */ + intf_info.rx_tp = 0; /* TBD */ + + intf_info.bss_info = 0; + intf_info.reg_class = (rwnx_vif->ap.band == 0) ? 81 : 128; + intf_info.phy_type = 7; + + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&intf_info, ELM_INTF_INFO_LEN); + + /* finish message */ + msg.type = AIC_NL_INTF_RPT_TYPE; + msg.len = msg_len; + + /* length += (type + len) */ + msg_len += 8; + aicwf_nl_send_msg((void *)&msg, msg_len); + + return; +} + +void aicwf_nl_send_sta_rpt_msg(struct rwnx_vif *rwnx_vif, struct rwnx_sta *sta) +{ + u32 msg_len = 0; + struct b_nl_message msg = {0}; + struct b_elm_header hdr = {0}; + struct b_elm_intf intf = {{0}}; + struct b_elm_sta_info sta_info = {{0}}; + struct b_elm_sta_info_ext sta_info_ext = {{0}}; + + if (!nl_daemon_on) + return; + + /* element header */ + hdr.id = AIC_ELM_INTF_ID; + hdr.len = ELM_INTF_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_INTF_ID */ + memcpy(intf.mac, rwnx_vif->ndev->dev_addr, 6); + intf.root = 0; /* TBD */ + intf.band = rwnx_vif->ap.band; + intf.ssid = rwnx_vif->rwnx_hw->iface_idx; + memcpy(intf.name, rwnx_vif->ndev->name, 16); + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&intf, ELM_INTF_LEN); + + /* element header */ + hdr.id = AIC_ELM_STA_INFO_ID; + hdr.len = ELM_STA_INFO_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_STA_INFO_ID */ + memcpy(sta_info.mac, sta->mac_addr, 6); + sta_info.rssi = sta->rssi; + sta_info.link_time = sta->link_time; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&sta_info, ELM_STA_INFO_LEN); + + /* element header */ + hdr.id = AIC_ELM_STA_INFO_EXT_ID; + hdr.len = ELM_STA_INFO_EXT_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_STA_INFO_EXT_ID */ + memcpy(sta_info_ext.mac, sta->mac_addr, 6); + sta_info_ext.supported_band = sta->support_band; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&sta_info_ext, ELM_STA_INFO_EXT_LEN); + + /* finish message */ + msg.type = AIC_NL_STA_RPT_TYPE; + msg.len = msg_len; + + /* length += (type + len) */ + msg_len += 8; + aicwf_nl_send_msg((void *)&msg, msg_len); + + return; +} + +void aicwf_nl_send_frame_rpt_msg(struct rwnx_vif *rwnx_vif, u16_l frame_type, u8_l *sa, s8_l rssi) +{ + u32 msg_len = 0; + struct b_nl_message msg = {0}; + struct b_elm_header hdr = {0}; + struct b_elm_frame_info frame_info = {0}; + struct b_elm_intf intf = {{0}}; + + if (!nl_daemon_on) + return; + + //AICWFDBG(LOGSTEER, "[NETLINK] %s, sta: "MAC_FMT"\n", __func__, MAC_ARG(sa)); + + /* TBD */ + if (frame_type == WIFI_PROBEREQ && rssi <= LOW_RSSI_IGNORE) { + AICWFDBG(LOGSTEER, MANAGER_STR"WIFI_PROBEREQ <= %d, ignore\n", LOW_RSSI_IGNORE); + return; + } + + /* element header */ + hdr.id = AIC_ELM_INTF_ID; + hdr.len = ELM_INTF_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_INTF_ID */ + intf.root = 0; /* TBD */ + intf.band = rwnx_vif->ap.band; + intf.ssid = rwnx_vif->rwnx_hw->iface_idx; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&intf, ELM_INTF_LEN); + + /* element header */ + hdr.id = AIC_ELM_FRAME_INFO_ID; + hdr.len = ELM_FRAME_INFO_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_FRAME_INFO_ID */ + frame_info.frame_type = frame_type; + memcpy(frame_info.sa, sa, 6); + frame_info.rssi = rssi; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&frame_info, ELM_FRAME_INFO_LEN); + + /* finish message */ + msg.type = AIC_NL_FRAME_RPT_TYPE; + msg.len = msg_len; + + /* length += (type + len) */ + msg_len += 8; + aicwf_nl_send_msg((void *)&msg, msg_len); + + return; +} + +void aicwf_nl_send_time_tick_msg(struct rwnx_vif *rwnx_vif) +{ + u32 msg_len = 0; + struct b_nl_message msg = {0}; + struct b_elm_header hdr = {0}; + struct b_elm_intf intf = {{0}}; + + if (!nl_daemon_on) + return; + + /* element header */ + hdr.id = AIC_ELM_INTF_ID; + hdr.len = ELM_INTF_LEN; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&hdr, ELM_HEADER_LEN); + + /* element: AIC_ELM_INTF_ID */ + intf.root = 0; /* TBD */ + intf.band = rwnx_vif->ap.band; + intf.ssid = rwnx_vif->rwnx_hw->iface_idx; + aicwf_netlink_set_msg(&msg, &msg_len, (void *)&intf, ELM_INTF_LEN); + + /* finish message */ + msg.type = AIC_NL_TIME_TICK_TYPE; + msg.len = msg_len; + + /* length += (type + len) */ + msg_len += 8; + aicwf_nl_send_msg((void *)&msg, msg_len); + + return; +} + +void aicwf_nl_hook(struct rwnx_vif *rwnx_vif, u8_l band, u8_l ssid) +{ + AICWFDBG(LOGSTEER, MANAGER_STR"%s band:%d, ssid:%d\n", __func__, band, ssid); + + nl_hook[band][ssid] = 1; + nl_rwnx_vif[band][ssid] = rwnx_vif; + + return; +} + +void aicwf_nl_hook_deinit(u8_l band, u8_l ssid) +{ + + AICWFDBG(LOGSTEER, MANAGER_STR"%s band:%d, ssid:%d\n", __func__, band, ssid); + + nl_hook[band][ssid] = 0; + nl_rwnx_vif[band][ssid] = NULL; + + return; +} + + +void aicwf_nl_init(void) +{ + if (nl_sock) { + AICWFDBG(LOGSTEER, MANAGER_STR"netlink already init.\n"); + return; + } + struct netlink_kernel_cfg cfg = { + .input = aicwf_nl_recv_msg, + }; + + nl_sock = netlink_kernel_create(&init_net, NL_AIC_PROTOCOL, &cfg); + if (!nl_sock) + AICWFDBG(LOGERROR, MANAGER_STR"create netlink falied.\n"); + + return; +} + +void aicwf_nl_deinit(void) +{ + if(nl_sock) + { + netlink_kernel_release(nl_sock); + nl_sock = NULL; + } + AICWFDBG(LOGSTEER, MANAGER_STR"[aicwf_nl_deinit] delete nl_sock netlink succeed.\n"); + + return; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_manager.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_manager.h new file mode 100644 index 0000000..36db12a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_manager.h @@ -0,0 +1,146 @@ +#ifndef _AICWF_MANAGER_H_ +#define _AICWF_MANAGER_H_ + +#include "lmac_types.h" +#include "rwnx_defs.h" + +#define DRV_WLAN_MANAGER_VER "v1.0" + + +/* TBD */ +#define CONFIG_IFACE_NUMBER 1 + +#define NL_AIC_PROTOCOL_FRT_DRV 28 +#define NL_AIC_PROTOCOL_SEC_DRV 29 +#define NL_AIC_PROTOCOL NL_AIC_PROTOCOL_SEC_DRV + +#define NL_AIC_MANAGER_PID 5185 + +#define NL_MAX_MSG_SIZE 768 + +/* Netlink Message Type List */ +#define AIC_NL_DAEMON_ON_TYPE 1 +#define AIC_NL_DAEMON_OFF_TYPE 2 +#define AIC_NL_DAEMON_ALIVE_TYPE 3 +#define AIC_NL_DEL_STA_TYPE 4 +#define AIC_NL_NEW_STA_TYPE 5 +#define AIC_NL_INTF_RPT_TYPE 6 +#define AIC_NL_STA_RPT_TYPE 7 +#define AIC_NL_FRAME_RPT_TYPE 8 +#define AIC_NL_TIME_TICK_TYPE 9 +#define AIC_NL_PRIV_INFO_CMD_TYPE 10 +#ifdef CONFIG_BAND_STEERING +#define AIC_NL_B_STEER_CMD_TYPE 11 +#define AIC_NL_B_STEER_BLOCK_ADD_TYPE 12 +#define AIC_NL_B_STEER_BLOCK_DEL_TYPE 13 +#define AIC_NL_B_STEER_ROAM_TYPE 14 +#endif + +#define AIC_NL_GENERAL_CMD_TYPE 100 +#define AIC_NL_CUSTOMER_TYPE 101 +#define AIC_NL_CONFIG_UPDATE_TYPE 102 + +/* Element ID */ +#define AIC_ELM_INTF_ID 1 +#define AIC_ELM_INTF_INFO_ID 2 +#define AIC_ELM_FRAME_INFO_ID 3 +#define AIC_ELM_STA_INFO_ID 4 +#define AIC_ELM_ROAM_INFO_ID 5 +#define AIC_ELM_BUFFER_ID 6 +#define AIC_ELM_STA_INFO_EXT_ID 7 + +#define LOW_RSSI_IGNORE (-85) + +struct b_nl_message { + u32_l type; + u32_l len; + u8_l content[NL_MAX_MSG_SIZE]; +}; + +struct b_elm_header { + u8_l id; + u8_l len; +}; + +struct b_elm_intf { + u8_l mac[6]; + u8_l root; + u8_l band; + u8_l ssid; + s8_l name[16]; +}; + +struct b_elm_intf_info { + u16_l ch; + u8_l ch_clm; + u8_l ch_noise; + u32_l tx_tp; + u32_l rx_tp; + u32_l assoc_sta_num; + /* self neighbor report info */ + u32_l bss_info; + u8_l reg_class; + u8_l phy_type; +}; + +struct b_elm_frame_info { + u16_l frame_type; + u8_l sa[6]; + s8_l rssi; +}; + +struct b_elm_sta_info { + u8_l mac[6]; + s8_l rssi; + u32_l link_time; + u32_l tx_tp; /* kbits */ + u32_l rx_tp; /* kbits */ +}; + +struct b_elm_roam_info { + u8_l sta_mac[6]; /* station mac */ + u8_l bss_mac[6]; /* target bss mac */ + u16_l bss_ch; /* target bss channel */ + u8_l method; /* 0: 11V, 1: Deauth */ +}; + +struct b_elm_buffer { + u8_l buf[255]; +}; + +struct b_elm_sta_info_ext { + u8_l mac[6]; + u8_l supported_band; /* bit0:2g bit1:5g */ + u8_l empty[119]; /* for future use */ +}; + + +/* Element Size List */ +#define ELM_HEADER_LEN (sizeof(struct b_elm_header)) +#define ELM_INTF_LEN (sizeof(struct b_elm_intf)) +#define ELM_INTF_INFO_LEN (sizeof(struct b_elm_intf_info)) +#define ELM_FRAME_INFO_LEN (sizeof(struct b_elm_frame_info)) +#define ELM_STA_INFO_LEN (sizeof(struct b_elm_sta_info)) +#define ELM_ROAM_INFO_LEN (sizeof(struct b_elm_roam_info)) +#define ELM_BUFFER_LEN (sizeof(struct b_elm_buffer)) +#define ELM_STA_INFO_EXT_LEN (sizeof(struct b_elm_sta_info_ext)) + + + +void aicwf_nl_send_del_sta_msg(struct rwnx_vif *rwnx_vif, u8_l *mac); +void aicwf_nl_send_new_sta_msg(struct rwnx_vif *rwnx_vif, u8_l *mac); +void aicwf_nl_send_intf_rpt_msg(struct rwnx_vif *rwnx_vif); +void aicwf_nl_send_sta_rpt_msg(struct rwnx_vif *rwnx_vif, struct rwnx_sta *sta); +void aicwf_nl_send_frame_rpt_msg(struct rwnx_vif *rwnx_vif, u16_l frame_type, u8_l *sa, s8_l rssi); +void aicwf_nl_send_time_tick_msg(struct rwnx_vif *rwnx_vif); +void aicwf_nl_hook(struct rwnx_vif *rwnx_vif, u8_l band, u8_l iface_id); +void aicwf_nl_hook_deinit(u8_l band, u8_l ssid); +void aicwf_nl_init(void); +void aicwf_nl_deinit(void); +void aicwf_wlan_manager_recv_msg(struct b_nl_message *msg); +void aicwf_nl_recv_msg(struct sk_buff *skb); +void aicwf_nl_send_msg(void *msg, u32_l msg_len); + + +#endif + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_rx_prealloc.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_rx_prealloc.h new file mode 100755 index 0000000..dc805a5 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_rx_prealloc.h @@ -0,0 +1,27 @@ + +#ifndef _AICWF_RX_PREALLOC_H_ +#define _AICWF_RX_PREALLOC_H_ + +#ifdef CONFIG_PREALLOC_RX_SKB + +struct rx_buff { + struct list_head queue; + unsigned char *data; + u32 len; + uint8_t *start; + uint8_t *end; + uint8_t *read; +}; + +struct aicwf_rx_buff_list { + struct list_head rxbuff_list; + atomic_t rxbuff_list_len; +}; + +extern struct rx_buff *aicwf_prealloc_rxbuff_alloc(spinlock_t *lock); +extern void aicwf_prealloc_rxbuff_free(struct rx_buff *rxbuff, spinlock_t *lock); +extern int aicwf_prealloc_init(void); +extern void aicwf_prealloc_exit(void); +extern int aicwf_rxbuff_size_get(void); +#endif +#endif /* _AICWF_RX_PREALLOC_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_sdio.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_sdio.c new file mode 100644 index 0000000..f8e79cc --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_sdio.c @@ -0,0 +1,1250 @@ +/** + * aicwf_sdmmc.c + * + * SDIO function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "aicwf_txrxif.h" +#include "aicwf_sdio.h" +#include "sdio_host.h" +#include "rwnx_defs.h" +#include "rwnx_platform.h" +#ifdef CONFIG_INGENIC_T20 +#include "mach/jzmmc.h" +#endif /* CONFIG_INGENIC_T20 */ + +#ifdef CONFIG_PLATFORM_ALLWINNER +void platform_wifi_power_off(void); +#endif + +int aicwf_sdio_readb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 *val) +{ + int ret; + sdio_claim_host(sdiodev->func); + *val = sdio_readb(sdiodev->func, regaddr, &ret); + sdio_release_host(sdiodev->func); + return ret; +} + +int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val) +{ + int ret; + sdio_claim_host(sdiodev->func); + sdio_writeb(sdiodev->func, val, regaddr, &ret); + sdio_release_host(sdiodev->func); + return ret; +} + +int aicwf_sdio_flow_ctrl(struct aic_sdio_dev *sdiodev) +{ + int ret = -1; + u8 fc_reg = 0; + u32 count = 0; + + while (true) { + ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_FLOW_CTRL_REG, &fc_reg); + if (ret) { + return -1; + } + + if ((fc_reg & SDIOWIFI_FLOWCTRL_MASK_REG) != 0) { + ret = fc_reg & SDIOWIFI_FLOWCTRL_MASK_REG; + return ret; + } else { + if (count >= FLOW_CTRL_RETRY_COUNT) { + ret = -fc_reg; + break; + } + count++; + if (count < 30) + udelay(200); + else if(count < 40) + mdelay(1); + else + mdelay(10); + } + } + + return ret; +} + +int aicwf_sdio_send_pkt(struct aic_sdio_dev *sdiodev, u8 *buf, uint count) +{ + int ret = 0; + + sdio_claim_host(sdiodev->func); + ret = sdio_writesb(sdiodev->func, 7, buf, count); + sdio_release_host(sdiodev->func); + + return ret; +} + +int aicwf_sdio_recv_pkt(struct aic_sdio_dev *sdiodev, struct sk_buff *skbbuf, + u32 size) +{ + int ret; + + if ((!skbbuf) || (!size)) { + return -EINVAL;; + } + + sdio_claim_host(sdiodev->func); + ret = sdio_readsb(sdiodev->func, skbbuf->data, 8, size); + sdio_release_host(sdiodev->func); + + if (ret < 0) { + return ret; + } + skbbuf->len = size; + + return ret; +} + +static int aicwf_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct mmc_host *host; + struct aic_sdio_dev *sdiodev; + struct aicwf_bus *bus_if; + int err = -ENODEV; + + sdio_dbg("%s:%d\n", __func__, func->num); + host = func->card->host; + if(func->num != 1) { + return err; + } + + bus_if = kzalloc(sizeof(struct aicwf_bus), GFP_KERNEL); + if (!bus_if) { + sdio_err("alloc bus fail\n"); + return -ENOMEM; + } + + sdiodev = kzalloc(sizeof(struct aic_sdio_dev), GFP_KERNEL); + if (!sdiodev) { + sdio_err("alloc sdiodev fail\n"); + kfree(bus_if); + return -ENOMEM; + } + + sdiodev->func = func; + sdiodev->bus_if = bus_if; + bus_if->bus_priv.sdio = sdiodev; + dev_set_drvdata(&func->dev, bus_if); + sdiodev->dev = &func->dev; + err = aicwf_sdio_func_init(sdiodev); + if (err < 0) { + sdio_err("sdio func init fail\n"); + goto fail; + } + + aicwf_sdio_bus_init(sdiodev); + host->caps |= MMC_CAP_NONREMOVABLE; + aicwf_rwnx_sdio_platform_init(sdiodev); + aicwf_hostif_ready(); + + return 0; +fail: + aicwf_sdio_func_deinit(sdiodev); + dev_set_drvdata(&func->dev, NULL); + kfree(sdiodev); + kfree(bus_if); + return err; +} + +static void aicwf_sdio_remove(struct sdio_func *func) +{ + struct mmc_host *host; + struct aicwf_bus *bus_if = NULL; + struct aic_sdio_dev *sdiodev = NULL; + + sdio_dbg("%s\n", __func__); + host = func->card->host; + host->caps &= ~MMC_CAP_NONREMOVABLE; + bus_if = dev_get_drvdata(&func->dev); + if (!bus_if) { + return; + } + + sdiodev = bus_if->bus_priv.sdio; + if (!sdiodev) { + return; + } + sdiodev->bus_if->state = BUS_DOWN_ST; + aicwf_sdio_release(sdiodev); + aicwf_sdio_func_deinit(sdiodev); + dev_set_drvdata(&sdiodev->func->dev, NULL); + kfree(sdiodev); + kfree(bus_if); + sdio_dbg("%s done\n", __func__); +#ifdef CONFIG_PLATFORM_ALLWINNER + platform_wifi_power_off(); +#endif +} + +static int aicwf_sdio_suspend(struct device *dev) +{ + int ret = 0; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + mmc_pm_flag_t sdio_flags; + struct rwnx_vif *rwnx_vif, *tmp; + + sdio_dbg("%s\n", __func__); + list_for_each_entry_safe(rwnx_vif, tmp, &sdiodev->rwnx_hw->vifs, list) { + if(rwnx_vif->ndev) + netif_device_detach(rwnx_vif->ndev); + } + + sdio_flags = sdio_get_host_pm_caps(sdiodev->func); + if (!(sdio_flags & MMC_PM_KEEP_POWER)) { + return -EINVAL; + } + ret = sdio_set_host_pm_flags(sdiodev->func, MMC_PM_KEEP_POWER); + if (ret) { + return ret; + } + + while (sdiodev->state == SDIO_ACTIVE_ST) { + if (down_interruptible(&sdiodev->tx_priv->txctl_sema)){ + continue; + } + aicwf_sdio_pwr_stctl(sdiodev, SDIO_SLEEP_ST); + up(&sdiodev->tx_priv->txctl_sema); + break; + } + + return 0; +} + +static int aicwf_sdio_resume(struct device *dev) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct rwnx_vif *rwnx_vif, *tmp; + + sdio_dbg("%s\n", __func__); + list_for_each_entry_safe(rwnx_vif, tmp, &sdiodev->rwnx_hw->vifs, list) { + if(rwnx_vif->ndev) + netif_device_attach(rwnx_vif->ndev); + } + + return 0; +} + +static const struct sdio_device_id aicwf_sdmmc_ids[] = { + {SDIO_DEVICE_CLASS(SDIO_CLASS_WLAN)}, + { }, +}; + +MODULE_DEVICE_TABLE(sdio, aicwf_sdmmc_ids); + +static const struct dev_pm_ops aicwf_sdio_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(aicwf_sdio_suspend, aicwf_sdio_resume) +}; + +static struct sdio_driver aicwf_sdio_driver = { + .probe = aicwf_sdio_probe, + .remove = aicwf_sdio_remove, + .name = AICWF_SDIO_NAME, + .id_table = aicwf_sdmmc_ids, + .drv = { + .pm = &aicwf_sdio_pm_ops, + }, +}; + +#ifdef CONFIG_NANOPI_M4 + extern int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq); + extern unsigned aic_max_freqs; + extern struct mmc_host* aic_host_drv; + extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); + extern void mmc_release_host(struct mmc_host *host); +#endif +#ifdef CONFIG_PLATFORM_ALLWINNER +extern void sunxi_mmc_rescan_card(unsigned ids); +extern void sunxi_wlan_set_power(int on); +extern int sunxi_wlan_get_bus_index(void); + +int platform_wifi_power_on(void) +{ + int ret=0; + int wlan_bus_index=sunxi_wlan_get_bus_index(); + if(wlan_bus_index < 0) + return wlan_bus_index; + + sunxi_wlan_set_power(1); + mdelay(1000); + sunxi_mmc_rescan_card(wlan_bus_index); + + printk("platform_wifi_power_on"); + + return ret; +} + +void platform_wifi_power_off(void) +{ + int wlan_bus_index = sunxi_wlan_get_bus_index(); + if(wlan_bus_index < 0) { + printk("no wlan_bus_index\n"); + return ; + } + printk("power_off\n"); + sunxi_wlan_set_power(0); + mdelay(100); + //sunxi_mmc_rescan_card(wlan_bus_index); + + printk("platform_wifi_power_off"); +} +#endif +void aicwf_sdio_register(void) +{ +#ifdef CONFIG_PLATFORM_NANOPI + extern_wifi_set_enable(0); + mdelay(200); + extern_wifi_set_enable(1); + mdelay(200); + sdio_reinit(); +#endif /*CONFIG_PLATFORM_NANOPI*/ + +#ifdef CONFIG_INGENIC_T20 + jzmmc_manual_detect(1, 1); +#endif /* CONFIG_INGENIC_T20 */ + +#ifdef CONFIG_NANOPI_M4 + if(aic_host_drv->card == NULL){ + __mmc_claim_host(aic_host_drv,NULL); + printk("aic: >>>mmc_rescan_try_freq\n"); + mmc_rescan_try_freq(aic_host_drv,aic_max_freqs); + mmc_release_host(aic_host_drv); + } +#endif +#ifdef CONFIG_PLATFORM_ALLWINNER + platform_wifi_power_on(); +#endif + if (sdio_register_driver(&aicwf_sdio_driver)) { + + } else { + //may add mmc_rescan here + } +} + +void aicwf_sdio_exit(void) +{ + if(g_rwnx_plat && g_rwnx_plat->enabled) + rwnx_platform_deinit(g_rwnx_plat->sdiodev->rwnx_hw); + + sdio_unregister_driver(&aicwf_sdio_driver); + +#ifdef CONFIG_PLATFORM_NANOPI + extern_wifi_set_enable(0); +#endif /*CONFIG_PLATFORM_NANOPI*/ + kfree(g_rwnx_plat); +} + +int aicwf_sdio_wakeup(struct aic_sdio_dev *sdiodev) +{ + int ret = 0; + int read_retry; + int write_retry = 20; + + if (sdiodev->state == SDIO_SLEEP_ST) { + down(&sdiodev->pwrctl_wakeup_sema); + if (sdiodev->rwnx_hw->vif_started) { + if(sdiodev->state == SDIO_ACTIVE_ST) { + up(&sdiodev->pwrctl_wakeup_sema); + return 0; + } + sdio_dbg("w\n"); + while(write_retry) { + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_WAKEUP_REG, 1); + if (ret) { + txrx_err("sdio wakeup fail\n"); + ret = -1; + } else { + read_retry=10; + while (read_retry) { + u8 val; + ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_SLEEP_REG, &val); + if (ret==0 && val&0x10) { + break; + } + read_retry--; + udelay(200); + } + if(read_retry != 0) + break; + } + sdio_dbg("write retry: %d \n",write_retry); + write_retry--; + udelay(100); + } + } + + sdiodev->state = SDIO_ACTIVE_ST; + aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration); + up(&sdiodev->pwrctl_wakeup_sema); + } + return ret; +} + +extern u8 dhcped; +int aicwf_sdio_sleep_allow(struct aic_sdio_dev *sdiodev) +{ + int ret = 0; + struct aicwf_bus *bus_if = sdiodev->bus_if; + struct rwnx_hw *rwnx_hw = sdiodev->rwnx_hw; + struct rwnx_vif *rwnx_vif, *tmp; + + if (bus_if->state == BUS_DOWN_ST) { + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_SLEEP_REG, 0x10); + if (ret) { + sdio_err("Write sleep fail!\n"); + } + aicwf_sdio_pwrctl_timer(sdiodev, 0); + return ret; + } + + list_for_each_entry_safe(rwnx_vif, tmp, &rwnx_hw->vifs, list) { + if((rwnx_hw->avail_idx_map & BIT(rwnx_vif->drv_vif_index)) == 0) { + switch(RWNX_VIF_TYPE(rwnx_vif)) { + case NL80211_IFTYPE_P2P_CLIENT: + rwnx_hw->is_p2p_alive = 1; + return ret; + case NL80211_IFTYPE_AP: + return ret; + case NL80211_IFTYPE_P2P_GO: + rwnx_hw->is_p2p_alive = 1; + return ret; + default: + break; + } + } + } + + sdio_info("sleep: %d, %d\n", sdiodev->state, rwnx_hw->scanning); + if (sdiodev->state == SDIO_ACTIVE_ST && !rwnx_hw->scanning && !rwnx_hw->is_p2p_alive \ + && !rwnx_hw->is_p2p_connected && dhcped) { + down(&sdiodev->pwrctl_wakeup_sema); + if (rwnx_hw->vif_started) { + sdio_dbg("s\n"); + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_SLEEP_REG, 0x10); + if (ret) + sdio_err("Write sleep fail!\n"); + } + sdiodev->state = SDIO_SLEEP_ST; + aicwf_sdio_pwrctl_timer(sdiodev, 0); + up(&sdiodev->pwrctl_wakeup_sema); + } + else{ + aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration); + } + + return ret; +} + +int aicwf_sdio_pwr_stctl(struct aic_sdio_dev *sdiodev, uint target) +{ + int ret = 0; + + if (sdiodev->bus_if->state == BUS_DOWN_ST) { + return -1; + } + + if (sdiodev->state == target) { + if (target == SDIO_ACTIVE_ST) { + aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration); + } + return ret; + } + + switch (target) { + case SDIO_ACTIVE_ST: + aicwf_sdio_wakeup(sdiodev); + break; + case SDIO_SLEEP_ST: + aicwf_sdio_sleep_allow(sdiodev); + break; + } + + return ret; +} + +int aicwf_sdio_txpkt(struct aic_sdio_dev *sdiodev, struct sk_buff *pkt) +{ + int ret = 0; + u8 *frame; + u32 len = 0; + struct aicwf_bus *bus_if = dev_get_drvdata(sdiodev->dev); + + if (bus_if->state == BUS_DOWN_ST) { + sdio_dbg("tx bus is down!\n"); + return -EINVAL; + } + + frame = (u8 *) (pkt->data); + len = pkt->len; + len = (len + SDIOWIFI_FUNC_BLOCKSIZE - 1) / SDIOWIFI_FUNC_BLOCKSIZE * SDIOWIFI_FUNC_BLOCKSIZE; + ret = aicwf_sdio_send_pkt(sdiodev, pkt->data, len); + if (ret) + sdio_err("aicwf_sdio_send_pkt fail%d\n", ret); + + return ret; +} + +static int aicwf_sdio_intr_get_len_bytemode(struct aic_sdio_dev *sdiodev, u8 *byte_len) +{ + int ret = 0; + + if (!byte_len) + return -EBADE; + + if (sdiodev->bus_if->state == BUS_DOWN_ST) { + *byte_len = 0; + } else { + ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_BYTEMODE_LEN_REG, byte_len); + sdiodev->rx_priv->data_len = (*byte_len)*4; + } + + return ret; +} + +static void aicwf_sdio_bus_stop(struct device *dev) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + int ret; + + aicwf_sdio_pwrctl_timer(sdiodev, 0); + if(timer_pending(&sdiodev->rwnx_hw->p2p_alive_timer)){ + ret = del_timer(&sdiodev->rwnx_hw->p2p_alive_timer);} + sdio_dbg("%s\n",__func__); + if (sdiodev->pwrctl_tsk) { + complete(&sdiodev->pwrctrl_trgg); + kthread_stop(sdiodev->pwrctl_tsk); + sdiodev->pwrctl_tsk = NULL; + } + + sdio_dbg("%s:pwrctl stopped\n",__func__); + + bus_if->state = BUS_DOWN_ST; + ret = down_interruptible(&sdiodev->tx_priv->txctl_sema); + if (ret) + sdio_err("down txctl_sema fail\n"); + + aicwf_sdio_pwr_stctl(sdiodev, SDIO_SLEEP_ST); + if (!ret) + up(&sdiodev->tx_priv->txctl_sema); + aicwf_frame_queue_flush(&sdiodev->tx_priv->txq); + sdio_dbg("exit %s\n",__func__); +} + +struct sk_buff *aicwf_sdio_readframes(struct aic_sdio_dev *sdiodev) +{ + int ret = 0; + u32 size = 0; + struct sk_buff *skb = NULL; + struct aicwf_bus *bus_if = dev_get_drvdata(sdiodev->dev); + + if (bus_if->state == BUS_DOWN_ST) { + sdio_dbg("bus down\n"); + return NULL; + } + + size = sdiodev->rx_priv->data_len; + skb = __dev_alloc_skb(size, GFP_KERNEL); + if (!skb) { + return NULL; + } + + ret = aicwf_sdio_recv_pkt(sdiodev, skb, size); + if (ret) { + dev_kfree_skb(skb); + skb = NULL; + } + + return skb; +} + +static int aicwf_sdio_tx_msg(struct aic_sdio_dev *sdiodev) +{ + int err = 0; + u16 len; + u8 *payload = sdiodev->tx_priv->cmd_buf; + u16 payload_len = sdiodev->tx_priv->cmd_len; + u8 adjust_str[4] = {0, 0, 0, 0}; + int adjust_len = 0; + int buffer_cnt = 0; + u8 retry = 0; + + len = payload_len; + if ((len % TX_ALIGNMENT) != 0) { + adjust_len = roundup(len, TX_ALIGNMENT); + memcpy(payload+payload_len, adjust_str, (adjust_len - len)); + payload_len += (adjust_len - len); + } + len = payload_len; + + //link tail is necessary + if ((len % SDIOWIFI_FUNC_BLOCKSIZE) != 0) { + memset(payload+payload_len, 0, TAIL_LEN); + payload_len += TAIL_LEN; + len = (payload_len/SDIOWIFI_FUNC_BLOCKSIZE + 1) * SDIOWIFI_FUNC_BLOCKSIZE; + } else{ + len = payload_len; + } + buffer_cnt = aicwf_sdio_flow_ctrl(sdiodev); + while ((buffer_cnt <= 0 || (buffer_cnt > 0 && len > (buffer_cnt * BUFFER_SIZE))) && retry < 5) { + retry++; + buffer_cnt = aicwf_sdio_flow_ctrl(sdiodev); + } + + down(&sdiodev->tx_priv->cmd_txsema); + if (buffer_cnt > 0 && len <= (buffer_cnt * BUFFER_SIZE)) { + err = aicwf_sdio_send_pkt(sdiodev, payload, len); + if (err) { + sdio_err("aicwf_sdio_send_pkt fail%d\n", err); + } + } else { + sdio_err("tx msg fc retry fail\n"); + up(&sdiodev->tx_priv->cmd_txsema); + return -1; + } + + sdiodev->tx_priv->cmd_txstate = false; + if (!err) + sdiodev->tx_priv->cmd_tx_succ= true; + else + sdiodev->tx_priv->cmd_tx_succ= false; + + up(&sdiodev->tx_priv->cmd_txsema); + + return err; +} + +static void aicwf_sdio_tx_process(struct aic_sdio_dev *sdiodev) +{ + int err = 0; + + if (sdiodev->bus_if->state == BUS_DOWN_ST) { + sdio_err("Bus is down\n"); + return; + } + + aicwf_sdio_pwr_stctl(sdiodev, SDIO_ACTIVE_ST); + + //config + sdio_info("send cmd\n"); + if (sdiodev->tx_priv->cmd_txstate) { + if (down_interruptible(&sdiodev->tx_priv->txctl_sema)) { + txrx_err("txctl down bus->txctl_sema fail\n"); + return; + } + if (sdiodev->state != SDIO_ACTIVE_ST) { + txrx_err("state err\n"); + up(&sdiodev->tx_priv->txctl_sema); + txrx_err("txctl up bus->txctl_sema fail\n"); + return; + } + + err = aicwf_sdio_tx_msg(sdiodev); + up(&sdiodev->tx_priv->txctl_sema); + if (waitqueue_active(&sdiodev->tx_priv->cmd_txdone_wait)) + wake_up(&sdiodev->tx_priv->cmd_txdone_wait); + } + + //data + sdio_info("send data\n"); + if (down_interruptible(&sdiodev->tx_priv->txctl_sema)) { + txrx_err("txdata down bus->txctl_sema\n"); + return; + } + + if (sdiodev->state != SDIO_ACTIVE_ST) { + txrx_err("sdio state err\n"); + up(&sdiodev->tx_priv->txctl_sema); + return; + } + + sdiodev->tx_priv->fw_avail_bufcnt = aicwf_sdio_flow_ctrl(sdiodev); + while (!aicwf_is_framequeue_empty(&sdiodev->tx_priv->txq)) { + aicwf_sdio_send(sdiodev->tx_priv); + if(sdiodev->tx_priv->cmd_txstate) + break; + } + + up(&sdiodev->tx_priv->txctl_sema); +} + +static int aicwf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) +{ + uint prio; + int ret = -EBADE; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + prio = (pkt->priority & 0x7); + spin_lock_bh(&sdiodev->tx_priv->txqlock); + if (!aicwf_frame_enq(sdiodev->dev, &sdiodev->tx_priv->txq, pkt, prio)) { + aicwf_dev_skb_free(pkt); + spin_unlock_bh(&sdiodev->tx_priv->txqlock); + return -ENOSR; + } else { + ret = 0; + } + + if (bus_if->state != BUS_UP_ST) { + sdio_err("bus_if stopped\n"); + spin_unlock_bh(&sdiodev->tx_priv->txqlock); + return -1; + } + + atomic_inc(&sdiodev->tx_priv->tx_pktcnt); + spin_unlock_bh(&sdiodev->tx_priv->txqlock); + complete(&bus_if->bustx_trgg); + + return ret; +} + +static int aicwf_sdio_bus_txmsg(struct device *dev, u8 *msg, uint msglen) +{ +// int ret = -1; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + down(&sdiodev->tx_priv->cmd_txsema); + sdiodev->tx_priv->cmd_txstate = true; + sdiodev->tx_priv->cmd_tx_succ = false; + sdiodev->tx_priv->cmd_buf = msg; + sdiodev->tx_priv->cmd_len = msglen; + up(&sdiodev->tx_priv->cmd_txsema); + + if (bus_if->state != BUS_UP_ST) { + sdio_err("bus has stop\n"); + return -1; + } + + complete(&bus_if->bustx_trgg); + #if 0 + if (sdiodev->tx_priv->cmd_txstate) { + int timeout = msecs_to_jiffies(CMD_TX_TIMEOUT); + ret = wait_event_interruptible_timeout(sdiodev->tx_priv->cmd_txdone_wait, \ + !(sdiodev->tx_priv->cmd_txstate), timeout); + } + + if (!sdiodev->tx_priv->cmd_txstate && sdiodev->tx_priv->cmd_tx_succ) { + ret = 0; + } else { + sdio_err("send faild:%d, %d,%x\n", sdiodev->tx_priv->cmd_txstate, sdiodev->tx_priv->cmd_tx_succ, ret); + ret = -EIO; + } + + return ret; + #endif + return 0; +} + +int aicwf_sdio_send(struct aicwf_tx_priv *tx_priv) +{ + struct sk_buff *pkt; + struct aic_sdio_dev *sdiodev = tx_priv->sdiodev; + u32 aggr_len = 0; + int retry_times = 0; + int max_retry_times = 5; + + aggr_len = (tx_priv->tail - tx_priv->head); + if(((atomic_read(&tx_priv->aggr_count) == 0) && (aggr_len != 0)) + || ((atomic_read(&tx_priv->aggr_count) != 0) && (aggr_len == 0))) { + if (aggr_len > 0) + aicwf_sdio_aggrbuf_reset(tx_priv); + goto done; + } + + if (tx_priv->fw_avail_bufcnt <= 0) { //flow control failed + tx_priv->fw_avail_bufcnt = aicwf_sdio_flow_ctrl(sdiodev); + while(tx_priv->fw_avail_bufcnt <=0 && retry_times < max_retry_times) { + retry_times++; + tx_priv->fw_avail_bufcnt = aicwf_sdio_flow_ctrl(sdiodev); + } + if(tx_priv->fw_avail_bufcnt <= 0) { + sdio_err("fc retry %d fail\n", tx_priv->fw_avail_bufcnt); + goto done; + } + } + + if (atomic_read(&tx_priv->aggr_count) == tx_priv->fw_avail_bufcnt) { + if (atomic_read(&tx_priv->aggr_count) > 0) { + tx_priv->fw_avail_bufcnt -= atomic_read(&tx_priv->aggr_count); + aicwf_sdio_aggr_send(tx_priv); //send and check the next pkt; + } + } else { + spin_lock_bh(&sdiodev->tx_priv->txqlock); + pkt = aicwf_frame_dequeue(&sdiodev->tx_priv->txq); + if (pkt == NULL) { + sdio_err("txq no pkt\n"); + spin_unlock_bh(&sdiodev->tx_priv->txqlock); + goto done; + } + atomic_dec(&sdiodev->tx_priv->tx_pktcnt); + spin_unlock_bh(&sdiodev->tx_priv->txqlock); + + if(tx_priv==NULL || tx_priv->tail==NULL || pkt==NULL) + txrx_err("null error\n"); + if (aicwf_sdio_aggr(tx_priv, pkt)) { + aicwf_sdio_aggrbuf_reset(tx_priv); + sdio_err("add aggr pkts failed!\n"); + goto done; + } + + //when aggr finish or there is cmd to send, just send this aggr pkt to fw + if ((int)atomic_read(&sdiodev->tx_priv->tx_pktcnt) == 0 || sdiodev->tx_priv->cmd_txstate) { //no more pkt send it! + tx_priv->fw_avail_bufcnt -= atomic_read(&tx_priv->aggr_count); + aicwf_sdio_aggr_send(tx_priv); + } else + goto done; + } + +done: + return 0; +} + +int aicwf_sdio_aggr(struct aicwf_tx_priv *tx_priv, struct sk_buff *pkt) +{ + struct rwnx_txhdr *txhdr = (struct rwnx_txhdr *)pkt->data; + u8 *start_ptr = tx_priv->tail; + u8 sdio_header[4]; + u8 adjust_str[4] = {0, 0, 0, 0}; + u32 curr_len = 0; + int allign_len = 0; + + sdio_header[0] =((pkt->len - sizeof(struct rwnx_txhdr) + sizeof(struct txdesc_api)) & 0xff); + sdio_header[1] =(((pkt->len - sizeof(struct rwnx_txhdr) + sizeof(struct txdesc_api)) >> 8)&0x0f); + sdio_header[2] = 0x01; //data + sdio_header[3] = 0; //reserved + + memcpy(tx_priv->tail, (u8 *)&sdio_header, sizeof(sdio_header)); + tx_priv->tail += sizeof(sdio_header); + //payload + memcpy(tx_priv->tail, (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api)); + tx_priv->tail += sizeof(struct txdesc_api); //hostdesc + memcpy(tx_priv->tail, (u8 *)((u8 *)txhdr + txhdr->sw_hdr->headroom), pkt->len-txhdr->sw_hdr->headroom); + tx_priv->tail += (pkt->len - txhdr->sw_hdr->headroom); + + //word alignment + curr_len = tx_priv->tail - tx_priv->head; + if (curr_len & (TX_ALIGNMENT - 1)) { + allign_len = roundup(curr_len, TX_ALIGNMENT)-curr_len; + memcpy(tx_priv->tail, adjust_str, allign_len); + tx_priv->tail += allign_len; + } + + start_ptr[0] = ((tx_priv->tail - start_ptr - 4) & 0xff); + start_ptr[1] = (((tx_priv->tail - start_ptr - 4)>>8) & 0x0f); + tx_priv->aggr_buf->dev = pkt->dev; + + if(!txhdr->sw_hdr->need_cfm) { + kmem_cache_free(txhdr->sw_hdr->rwnx_vif->rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr); + skb_pull(pkt, txhdr->sw_hdr->headroom); + consume_skb(pkt); + } + + atomic_inc(&tx_priv->aggr_count); + return 0; +} + +void aicwf_sdio_aggr_send(struct aicwf_tx_priv *tx_priv) +{ + struct sk_buff *tx_buf = tx_priv->aggr_buf; + int ret = 0; + int curr_len = 0; + + //link tail is necessary + curr_len = tx_priv->tail - tx_priv->head; + if ((curr_len % TXPKT_BLOCKSIZE) != 0) { + memset(tx_priv->tail, 0, TAIL_LEN); + tx_priv->tail += TAIL_LEN; + } + + tx_buf->len = tx_priv->tail - tx_priv->head; + ret = aicwf_sdio_txpkt(tx_priv->sdiodev, tx_buf); + if (ret < 0) { + sdio_err("fail to send aggr pkt!\n"); + } + + aicwf_sdio_aggrbuf_reset(tx_priv); +} + +void aicwf_sdio_aggrbuf_reset(struct aicwf_tx_priv *tx_priv) +{ + struct sk_buff *aggr_buf = tx_priv->aggr_buf; + + tx_priv->tail = tx_priv->head; + aggr_buf->len = 0; + atomic_set(&tx_priv->aggr_count, 0); +} + +static int aicwf_sdio_bus_start(struct device *dev) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + int ret = 0; + +#if 1 + sdio_claim_host(sdiodev->func); + sdio_claim_irq(sdiodev->func, aicwf_sdio_hal_irqhandler); +#else + //since we have func2 we don't register irq handler + sdio_claim_irq(sdiodev->func, NULL); + sdiodev->func->irq_handler = (sdio_irq_handler_t *)aicwf_sdio_hal_irqhandler; +#endif + //enable sdio interrupt + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_INTR_CONFIG_REG, 0x07); + sdio_release_host(sdiodev->func); + + if (ret != 0) + sdio_err("intr register failed:%d\n", ret); + + bus_if->state = BUS_UP_ST; + + return ret; +} + +int sdio_bustx_thread(void *data) +{ + struct aicwf_bus *bus = (struct aicwf_bus *) data; + struct aic_sdio_dev *sdiodev = bus->bus_priv.sdio; + + while (1) { + if(kthread_should_stop()) { + sdio_err("sdio bustx thread stop\n"); + break; + } + if (!wait_for_completion_interruptible(&bus->bustx_trgg)) { + if(sdiodev->bus_if->state == BUS_DOWN_ST) + break; + + if((int)(atomic_read(&sdiodev->tx_priv->tx_pktcnt) > 0) || (sdiodev->tx_priv->cmd_txstate == true)) + aicwf_sdio_tx_process(sdiodev); + } + } + + return 0; +} + +int sdio_busrx_thread(void *data) +{ + struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data; + struct aicwf_bus *bus_if = rx_priv->sdiodev->bus_if; + + while (1) { + if(kthread_should_stop()) { + sdio_err("sdio busrx thread stop\n"); + break; + } + if (!wait_for_completion_interruptible(&bus_if->busrx_trgg)) { + if(bus_if->state == BUS_DOWN_ST) + break; + aicwf_process_rxframes(rx_priv); + } + } + + return 0; +} + +static int aicwf_sdio_pwrctl_thread(void *data) +{ + struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *) data; + + while (1) { + if(kthread_should_stop()) { + sdio_err("sdio pwrctl thread stop\n"); + break; + } + if (!wait_for_completion_interruptible(&sdiodev->pwrctrl_trgg)) { + if(sdiodev->bus_if->state == BUS_DOWN_ST) + break; + if (sdiodev->state == SDIO_ACTIVE_ST) { + if((int)(atomic_read(&sdiodev->tx_priv->tx_pktcnt) <= 0) && (sdiodev->tx_priv->cmd_txstate == false) && \ + atomic_read(&sdiodev->rx_priv->rx_cnt)==0) + aicwf_sdio_pwr_stctl(sdiodev, SDIO_SLEEP_ST); + else + aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration); + } + } else { + continue; + } + } + + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +static void aicwf_sdio_bus_pwrctl(ulong data) +#else +static void aicwf_sdio_bus_pwrctl(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *) data; +#else + struct aic_sdio_dev *sdiodev = from_timer(sdiodev, t, timer); +#endif + + if (sdiodev->bus_if->state == BUS_DOWN_ST) { + sdio_err("bus down\n"); + return; + } + + if (sdiodev->pwrctl_tsk){ + complete(&sdiodev->pwrctrl_trgg); + } +} + + +static void aicwf_sdio_enq_rxpkt(struct aic_sdio_dev *sdiodev, struct sk_buff *pkt) +{ + struct aicwf_rx_priv* rx_priv = sdiodev->rx_priv; + unsigned long flags = 0; + + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if (!aicwf_rxframe_enqueue(sdiodev->dev, &rx_priv->rxq, pkt)) { + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + aicwf_dev_skb_free(pkt); + return; + } + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + + atomic_inc(&rx_priv->rx_cnt); +} + +#define SDIO_OTHER_INTERRUPT (0x1ul << 7) + +void aicwf_sdio_hal_irqhandler(struct sdio_func *func) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(&func->dev); + struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + u8 intstatus = 0; + u8 byte_len = 0; + struct sk_buff *pkt = NULL; + int ret; + + if (!bus_if || bus_if->state == BUS_DOWN_ST) { + sdio_err("bus err\n"); + return; + } + + ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_BLOCK_CNT_REG, &intstatus); + while(ret || (intstatus & SDIO_OTHER_INTERRUPT)) { + sdio_err("ret=%d, intstatus=%x\r\n",ret, intstatus); + ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_BLOCK_CNT_REG, &intstatus); + } + sdiodev->rx_priv->data_len = intstatus * SDIOWIFI_FUNC_BLOCKSIZE; + + if (intstatus > 0) { + if(intstatus < 64) { + pkt = aicwf_sdio_readframes(sdiodev); + } else { + aicwf_sdio_intr_get_len_bytemode(sdiodev, &byte_len);//byte_len must<= 128 + sdio_info("byte mode len=%d\r\n", byte_len); + pkt = aicwf_sdio_readframes(sdiodev); + } + } else { + #ifndef CONFIG_PLATFORM_ALLWINNER + sdio_err("Interrupt but no data\n"); + #endif + } + + if (pkt) + aicwf_sdio_enq_rxpkt(sdiodev, pkt); + + complete(&bus_if->busrx_trgg); +} + +void aicwf_sdio_pwrctl_timer(struct aic_sdio_dev *sdiodev, uint duration) +{ + uint timeout; + + if (sdiodev->bus_if->state == BUS_DOWN_ST && duration) + return; + + spin_lock_bh(&sdiodev->pwrctl_lock); + if (!duration) { + if (timer_pending(&sdiodev->timer)) + del_timer_sync(&sdiodev->timer); + } else { + sdiodev->active_duration = duration; + timeout = msecs_to_jiffies(sdiodev->active_duration); + mod_timer(&sdiodev->timer, jiffies + timeout); + } + spin_unlock_bh(&sdiodev->pwrctl_lock); +} + +static struct aicwf_bus_ops aicwf_sdio_bus_ops = { + .stop = aicwf_sdio_bus_stop, + .start = aicwf_sdio_bus_start, + .txdata = aicwf_sdio_bus_txdata, + .txmsg = aicwf_sdio_bus_txmsg, +}; + +void aicwf_sdio_release(struct aic_sdio_dev *sdiodev) +{ + struct aicwf_bus *bus_if; + struct aicwf_rx_priv* rx_priv = NULL; + int ret; + sdio_dbg("%s\n", __func__); + + bus_if = dev_get_drvdata(sdiodev->dev); + bus_if->state = BUS_DOWN_ST; + + sdio_claim_host(sdiodev->func); + //disable sdio interrupt + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_INTR_CONFIG_REG, 0x0); + if (ret < 0) { + sdio_err("reg:%d write failed!\n", SDIOWIFI_INTR_CONFIG_REG); + } + sdio_release_irq(sdiodev->func); + sdio_release_host(sdiodev->func); + + if (sdiodev->dev) + aicwf_bus_deinit(sdiodev->dev); + + aicwf_tx_deinit(sdiodev->tx_priv); + rx_priv = sdiodev->rx_priv; + if(rx_priv != NULL) + aicwf_rx_deinit(rx_priv); + rwnx_cmd_mgr_deinit(&sdiodev->cmd_mgr); + sdio_dbg("exit %s\n", __func__); +} + +int aicwf_sdio_func_init(struct aic_sdio_dev *sdiodev) +{ + struct mmc_host *host; + u8 block_bit0 = 0x1; + u8 byte_mode_disable = 0x1;//1: no byte mode + int ret = 0; + host = sdiodev->func->card->host; + + sdio_claim_host(sdiodev->func); + ret = sdio_set_block_size(sdiodev->func, SDIOWIFI_FUNC_BLOCKSIZE); + if (ret < 0) { + sdio_err("set blocksize fail %d\n", ret); + sdio_release_host(sdiodev->func); + return ret; + } + ret = sdio_enable_func(sdiodev->func); + if (ret < 0) { + sdio_release_host(sdiodev->func); + sdio_err("enable func fail %d.\n", ret); + } + + host->ios.clock = 60000000; + host->ops->set_ios(host, &host->ios); + sdio_release_host(sdiodev->func); + sdio_dbg("Set SDIO Clock %d MHz\n",host->ios.clock/1000000); + + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_REGISTER_BLOCK, block_bit0); + if (ret < 0) { + sdio_err("reg:%d write failed!\n", SDIOWIFI_REGISTER_BLOCK); + return ret; + } + + //1: no byte mode + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_BYTEMODE_ENABLE_REG, byte_mode_disable); + if (ret < 0) { + sdio_err("reg:%d write failed!\n", SDIOWIFI_BYTEMODE_ENABLE_REG); + return ret; + } + + return ret; +} + + +void aicwf_sdio_func_deinit(struct aic_sdio_dev *sdiodev) +{ + sdio_claim_host(sdiodev->func); + sdio_disable_func(sdiodev->func); + sdio_release_host(sdiodev->func); +} + +void *aicwf_sdio_bus_init(struct aic_sdio_dev *sdiodev) +{ + int ret; + struct aicwf_bus *bus_if; + struct aicwf_rx_priv *rx_priv; + struct aicwf_tx_priv *tx_priv; + + spin_lock_init(&sdiodev->pwrctl_lock); + sema_init(&sdiodev->pwrctl_wakeup_sema, 1); + + bus_if = sdiodev->bus_if; + bus_if->dev = sdiodev->dev; + bus_if->ops = &aicwf_sdio_bus_ops; + bus_if->state = BUS_DOWN_ST; + sdiodev->state = SDIO_SLEEP_ST; + sdiodev->active_duration = SDIOWIFI_PWR_CTRL_INTERVAL; + + rx_priv = aicwf_rx_init(sdiodev); + if(!rx_priv) { + sdio_err("rx init fail\n"); + goto fail; + } + sdiodev->rx_priv = rx_priv; + + tx_priv = aicwf_tx_init(sdiodev); + if(!tx_priv) { + sdio_err("tx init fail\n"); + goto fail; + } + sdiodev->tx_priv = tx_priv; + aicwf_frame_queue_init(&tx_priv->txq, 8, TXQLEN); + spin_lock_init(&tx_priv->txqlock); + sema_init(&tx_priv->txctl_sema,1); + sema_init(&tx_priv->cmd_txsema, 1); + init_waitqueue_head(&tx_priv->cmd_txdone_wait); + atomic_set(&tx_priv->tx_pktcnt, 0); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + init_timer(&sdiodev->timer); + sdiodev->timer.data = (ulong) sdiodev; + sdiodev->timer.function = aicwf_sdio_bus_pwrctl; +#else + timer_setup(&sdiodev->timer, aicwf_sdio_bus_pwrctl, 0); +#endif + init_completion(&sdiodev->pwrctrl_trgg); +#ifdef AICWF_SDIO_SUPPORT + sdiodev->pwrctl_tsk = kthread_run(aicwf_sdio_pwrctl_thread, sdiodev, "aicwf_pwrctl"); +#endif + if (IS_ERR(sdiodev->pwrctl_tsk)) { + sdiodev->pwrctl_tsk = NULL; + } + + ret = aicwf_bus_init(0, sdiodev->dev); + if (ret < 0) { + sdio_err("bus init fail\n"); + goto fail; + } + + ret = aicwf_bus_start(bus_if); + if (ret != 0) { + sdio_err("bus start fail\n"); + goto fail; + } + + return sdiodev; + +fail: + aicwf_sdio_release(sdiodev); + return NULL; +} + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_sdio.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_sdio.h new file mode 100644 index 0000000..02c4237 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_sdio.h @@ -0,0 +1,99 @@ +/** + * aicwf_sdio.h + * + * SDIO function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#ifndef _AICWF_SDMMC_H_ +#define _AICWF_SDMMC_H_ + +#ifdef AICWF_SDIO_SUPPORT +#include +#include +#include +#include +#include "rwnx_cmds.h" +#define AICWF_SDIO_NAME "aicwf_sdio" +#define SDIOWIFI_FUNC_BLOCKSIZE 512 + +#define SDIO_VENDOR_ID_AIC 0x8800 +#define SDIO_DEVICE_ID_AIC 0x0001 +#define SDIOWIFI_BYTEMODE_LEN_REG 0x02 +#define SDIOWIFI_INTR_CONFIG_REG 0x04 +#define SDIOWIFI_SLEEP_REG 0x05 +#define SDIOWIFI_WAKEUP_REG 0x09 +#define SDIOWIFI_FLOW_CTRL_REG 0x0A +#define SDIOWIFI_REGISTER_BLOCK 0x0B +#define SDIOWIFI_BYTEMODE_ENABLE_REG 0x11 +#define SDIOWIFI_BLOCK_CNT_REG 0x12 +#define SDIOWIFI_FLOWCTRL_MASK_REG 0x7F + +#define SDIOWIFI_PWR_CTRL_INTERVAL 30 +#define FLOW_CTRL_RETRY_COUNT 50 +#define BUFFER_SIZE 1536 +#define TAIL_LEN 4 +#define TXQLEN (2048*4) + +#define SDIO_SLEEP_ST 0 +#define SDIO_ACTIVE_ST 1 + +typedef enum { + SDIO_TYPE_DATA = 0X00, + SDIO_TYPE_CFG = 0X10, + SDIO_TYPE_CFG_CMD_RSP = 0X11, + SDIO_TYPE_CFG_DATA_CFM = 0X12 +} sdio_type; + +struct rwnx_hw; + + +struct aic_sdio_dev { + struct rwnx_hw *rwnx_hw; + struct sdio_func *func; + struct device *dev; + struct aicwf_bus *bus_if; + struct rwnx_cmd_mgr cmd_mgr; + + struct aicwf_rx_priv *rx_priv; + struct aicwf_tx_priv *tx_priv; + u32 state; + + //for sdio pwr ctrl + struct timer_list timer; + uint active_duration; + struct completion pwrctrl_trgg; + struct task_struct *pwrctl_tsk; + spinlock_t pwrctl_lock; + struct semaphore pwrctl_wakeup_sema; +}; +int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val); +void aicwf_sdio_hal_irqhandler(struct sdio_func *func); +void aicwf_sdio_pwrctl_timer(struct aic_sdio_dev *sdiodev, uint duration); +int aicwf_sdio_pwr_stctl(struct aic_sdio_dev *sdiodev, uint target); +int aicwf_sdio_func_init(struct aic_sdio_dev *sdiodev); +void aicwf_sdio_func_deinit(struct aic_sdio_dev *sdiodev); +int aicwf_sdio_flow_ctrl(struct aic_sdio_dev *sdiodev); +int aicwf_sdio_recv_pkt(struct aic_sdio_dev *sdiodev, struct sk_buff *skbbuf, u32 size); +int aicwf_sdio_send_pkt(struct aic_sdio_dev *sdiodev, u8 *buf, uint count); +void *aicwf_sdio_bus_init(struct aic_sdio_dev *sdiodev); +void aicwf_sdio_release(struct aic_sdio_dev *sdiodev); +void aicwf_sdio_exit(void); +void aicwf_sdio_register(void); +int aicwf_sdio_txpkt(struct aic_sdio_dev *sdiodev, struct sk_buff *pkt); +int sdio_bustx_thread(void *data); +int sdio_busrx_thread(void *data); +int aicwf_sdio_aggr(struct aicwf_tx_priv *tx_priv, struct sk_buff *pkt); +int aicwf_sdio_send(struct aicwf_tx_priv *tx_priv); +void aicwf_sdio_aggr_send(struct aicwf_tx_priv *tx_priv); +void aicwf_sdio_aggrbuf_reset(struct aicwf_tx_priv* tx_priv); +extern void aicwf_hostif_ready(void); +#ifdef CONFIG_PLATFORM_NANOPI +extern void extern_wifi_set_enable(int is_on); +extern void sdio_reinit(void); +#endif /*CONFIG_PLATFORM_NANOPI*/ + +#endif /* AICWF_SDIO_SUPPORT */ + +#endif /*_AICWF_SDMMC_H_*/ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_steering.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_steering.c new file mode 100644 index 0000000..d6d20ca --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_steering.c @@ -0,0 +1,217 @@ +/** + ****************************************************************************** + * + * Copyright (C) 2020 AIC semiconductor. + * + * @file aicwf_steering.c + * + * @brief band steering definitions + * + ****************************************************************************** + */ + +#include "aicwf_steering.h" +#include "rwnx_defs.h" + +#define MAC_FMT_B "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_ARG_B(x) ((u8_l *)(x))[0], ((u8_l *)(x))[1], ((u8_l *)(x))[2], ((u8_l *)(x))[3], ((u8_l *)(x))[4], ((u8_l *)(x))[5] +#define STEEER_STR "[STEERING] USB " + +#ifdef CONFIG_BAND_STEERING + +static struct b_steer_block_entry *block_entry_lookup(struct rwnx_vif *rwnx_vif, u8_l *mac) +{ + u8_l i; + struct b_steer_block_entry *ent = NULL; + + for (i = 0; i < B_STEER_ENTRY_NUM; i++) { + ent = &(rwnx_vif->bsteerpriv.block_list[i]); + if (ent->used && (memcmp(ent->mac, mac, 6) == 0)) { + AICWFDBG(LOGSTEER, STEEER_STR"%s "MAC_FMT_B"\n", __func__, MAC_ARG_B(mac)); + return ent; + } + } + + return NULL; +} + +void aicwf_band_steering_expire(struct rwnx_vif *rwnx_vif) +{ + u8_l i; + struct b_steer_block_entry *ent = NULL; + + if (rwnx_vif->bsteerpriv.inited == false) { + AICWFDBG(LOGSTEER, STEEER_STR"%s bsteerpriv not inited\n", __func__); + return; + } + + spin_lock_bh(&(rwnx_vif->bsteerpriv.lock)); + + /* block entry */ + for (i = 0; i < B_STEER_ENTRY_NUM; i++) { + ent = &(rwnx_vif->bsteerpriv.block_list[i]); + if (!ent->used) + continue; + + if (ent->entry_expire) { + ent->entry_expire--; + if (ent->entry_expire == 0) + ent->used = 0; + } + } + + spin_unlock_bh(&(rwnx_vif->bsteerpriv.lock)); + + return; +} + +s32_l aicwf_band_steering_block_chk(struct rwnx_vif *rwnx_vif, u8_l *mac) +{ + s32 ret = 0; + struct b_steer_block_entry *ent = NULL; + + if (rwnx_vif->bsteerpriv.inited == false) { + AICWFDBG(LOGSTEER, STEEER_STR"%s bsteerpriv not inited\n", __func__); + return 0; + } + + spin_lock_bh(&(rwnx_vif->bsteerpriv.lock)); + + ent = block_entry_lookup(rwnx_vif, mac); + if (ent) + ret = 1; + + spin_unlock_bh(&(rwnx_vif->bsteerpriv.lock)); + + return ret; +} + +void aicwf_band_steering_block_entry_add(struct rwnx_vif *rwnx_vif, u8_l *mac) +{ + u8_l i; + struct b_steer_block_entry *ent = NULL; + + if (rwnx_vif->bsteerpriv.inited == false) { + AICWFDBG(LOGSTEER, STEEER_STR"%s bsteerpriv not inited\n", __func__); + return; + } + + AICWFDBG(LOGSTEER, STEEER_STR"%s "MAC_FMT_B"\n", __func__, MAC_ARG_B(mac)); + + spin_lock_bh(&(rwnx_vif->bsteerpriv.lock)); + + ent = block_entry_lookup(rwnx_vif, mac); + + /* already exist */ + if (ent) { + AICWFDBG(LOGSTEER, STEEER_STR"%s already exist\n", __func__); + ent->entry_expire = B_STEER_BLOCK_ENTRY_EXPIRE; + goto func_return; + } + + /* find an empty entry */ + for (i = 0; i < B_STEER_ENTRY_NUM; i++) { + if (!rwnx_vif->bsteerpriv.block_list[i].used) { + ent = &(rwnx_vif->bsteerpriv.block_list[i]); + break; + } + } + + /* add the entry */ + if (ent) { + AICWFDBG(LOGSTEER, STEEER_STR"%s "MAC_FMT_B"\n", __func__, MAC_ARG_B(mac)); + ent->used = 1; + memcpy(ent->mac, mac, 6); + ent->entry_expire = B_STEER_BLOCK_ENTRY_EXPIRE; + } + +func_return: + spin_unlock_bh(&(rwnx_vif->bsteerpriv.lock)); + + return; +} + +void aicwf_band_steering_block_entry_del(struct rwnx_vif *rwnx_vif, u8_l *mac) +{ + struct b_steer_block_entry *ent = NULL; + + if (rwnx_vif->bsteerpriv.inited == false) { + AICWFDBG(LOGSTEER, STEEER_STR"%s bsteerpriv not inited\n", __func__); + return; + } + + AICWFDBG(LOGSTEER, STEEER_STR"%s "MAC_FMT_B"\n", __func__, MAC_ARG_B(mac)); + + spin_lock_bh(&(rwnx_vif->bsteerpriv.lock)); + + ent = block_entry_lookup(rwnx_vif, mac); + if (ent) + ent->used = 0; + + spin_unlock_bh(&(rwnx_vif->bsteerpriv.lock)); + + return; +} + +void aicwf_band_steering_roam_block_entry_add(struct rwnx_vif *rwnx_vif, u8_l *mac) +{ + u8_l i; + struct b_steer_block_entry *ent = NULL; + + if (rwnx_vif->bsteerpriv.inited == false) { + AICWFDBG(LOGSTEER, STEEER_STR"%s bsteerpriv not inited\n", __func__); + return; + } + + AICWFDBG(LOGSTEER, STEEER_STR"%s "MAC_FMT_B"\n", __func__, MAC_ARG_B(mac)); + + spin_lock_bh(&rwnx_vif->bsteerpriv.lock); + + ent = block_entry_lookup(rwnx_vif, mac); + + /* already exist */ + if (ent) + goto func_return; + + /* find an empty entry */ + for (i = 0; i < B_STEER_ENTRY_NUM; i++) { + if (!rwnx_vif->bsteerpriv.block_list[i].used) { + ent = &(rwnx_vif->bsteerpriv.block_list[i]); + break; + } + } + + /* add the entry */ + if (ent) { + ent->used = 1; + memcpy(ent->mac, mac, 6); + ent->entry_expire = B_STEER_ROAM_BLOCK_ENTRY_EXPIRE; + } + +func_return: + spin_unlock_bh(&rwnx_vif->bsteerpriv.lock); + + return; +} + +void aicwf_band_steering_init(struct rwnx_vif *rwnx_vif) +{ + u8_l i; + + AICWFDBG(LOGSTEER, STEEER_STR"%s\n", __func__); + spin_lock_init(&rwnx_vif->bsteerpriv.lock); + rwnx_vif->bsteerpriv.inited = true; + + spin_lock_bh(&rwnx_vif->bsteerpriv.lock); + + /* block entry */ + for (i = 0; i < B_STEER_ENTRY_NUM; i++) + memset(&(rwnx_vif->bsteerpriv.block_list[i]), 0, sizeof(struct b_steer_block_entry)); + + spin_unlock_bh(&rwnx_vif->bsteerpriv.lock); + + return; +} + +#endif + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_steering.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_steering.h new file mode 100644 index 0000000..b7c59ef --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_steering.h @@ -0,0 +1,40 @@ +#ifndef _AICWF_STEERING_H_ +#define _AICWF_STEERING_H_ + +#include +#include "lmac_types.h" + +#ifdef CONFIG_BAND_STEERING + +#define B_STEER_ENTRY_NUM 64//32 + +#define B_STEER_BLOCK_ENTRY_EXPIRE 60 +#define B_STEER_ROAM_BLOCK_ENTRY_EXPIRE 5 +#define STEER_UPFATE_TIME 2000 + +struct rwnx_vif; + +struct b_steer_block_entry { + u8_l used; + u8_l mac[6]; + u32_l entry_expire; +}; + +struct b_steer_priv { + struct b_steer_block_entry block_list[B_STEER_ENTRY_NUM]; + spinlock_t lock; + bool inited; +}; + + +void aicwf_band_steering_expire(struct rwnx_vif *rwnx_vif); +s32_l aicwf_band_steering_block_chk(struct rwnx_vif *rwnx_vif, u8_l *mac); +void aicwf_band_steering_block_entry_add(struct rwnx_vif *rwnx_vif, u8_l *mac); +void aicwf_band_steering_block_entry_del(struct rwnx_vif *rwnx_vif, u8_l *mac); +void aicwf_band_steering_roam_block_entry_add(struct rwnx_vif *rwnx_vif, u8_l *mac); +void aicwf_band_steering_init(struct rwnx_vif *rwnx_vif); + +#endif + +#endif + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_tcp_ack.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_tcp_ack.c new file mode 100644 index 0000000..480fd07 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_tcp_ack.c @@ -0,0 +1,630 @@ +#include"aicwf_tcp_ack.h" +//#include"rwnx_tx.h" +//#include "aicwf_tcp_ack.h" +#include"rwnx_defs.h" +extern int intf_tx(struct rwnx_hw *priv,struct msg_buf *msg); +struct msg_buf *intf_tcp_alloc_msg(struct msg_buf *msg) +{ + //printk("%s \n",__func__); + int len=sizeof(struct msg_buf) ; + msg = kzalloc(len , GFP_ATOMIC); + if(!msg) + printk("%s: alloc failed \n", __func__); + memset(msg,0,len); + return msg; +} + +void intf_tcp_drop_msg(struct rwnx_hw *priv, + struct msg_buf *msg) +{ + //printk("%s \n",__func__); + if (msg->skb) + dev_kfree_skb_any(msg->skb); + + kfree(msg); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +void tcp_ack_timeout(unsigned long data) +#else +void tcp_ack_timeout(struct timer_list *t) +#endif +{ + //printk("%s \n",__func__); + struct tcp_ack_info *ack_info; + struct msg_buf *msg; + struct tcp_ack_manage *ack_m = NULL; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + ack_info = (struct tcp_ack_info *)data; +#else + ack_info = container_of(t,struct tcp_ack_info,timer); +#endif + + ack_m = container_of(ack_info, struct tcp_ack_manage, + ack_info[ack_info->ack_info_num]); + + write_seqlock_bh(&ack_info->seqlock); + msg = ack_info->msgbuf; + if (ack_info->busy && msg && !ack_info->in_send_msg) { + ack_info->msgbuf = NULL; + ack_info->drop_cnt = 0; + ack_info->in_send_msg = msg; + write_sequnlock_bh(&ack_info->seqlock); + intf_tx(ack_m->priv, msg);//send skb + //ack_info->in_send_msg = NULL;//add by dwx + //write_sequnlock_bh(&ack_info->seqlock); + //intf_tx(ack_m->priv, msg); + return; + } + write_sequnlock_bh(&ack_info->seqlock); +} + +void tcp_ack_init(struct rwnx_hw *priv) +{ + int i; + struct tcp_ack_info *ack_info; + struct tcp_ack_manage *ack_m = &priv->ack_m; + + printk("========== tcp ack debug %s \n",__func__); + memset(ack_m, 0, sizeof(struct tcp_ack_manage)); + ack_m->priv = priv; + spin_lock_init(&ack_m->lock); + atomic_set(&ack_m->max_drop_cnt, TCP_ACK_DROP_CNT); + ack_m->last_time = jiffies; + ack_m->timeout = msecs_to_jiffies(ACK_OLD_TIME); + + for (i = 0; i < TCP_ACK_NUM; i++) { + ack_info = &ack_m->ack_info[i]; + ack_info->ack_info_num = i; + seqlock_init(&ack_info->seqlock); + ack_info->last_time = jiffies; + ack_info->timeout = msecs_to_jiffies(ACK_OLD_TIME); + + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + setup_timer(&ack_info->timer, tcp_ack_timeout, + (unsigned long)ack_info); + #else + timer_setup(&ack_info->timer,tcp_ack_timeout,0); + #endif + } + + atomic_set(&ack_m->enable, 1); + ack_m->ack_winsize = MIN_WIN; +} + +void tcp_ack_deinit(struct rwnx_hw *priv) +{ + int i; + struct tcp_ack_manage *ack_m = &priv->ack_m; + struct msg_buf *drop_msg = NULL; + + printk("%s \n",__func__); + atomic_set(&ack_m->enable, 0); + + for (i = 0; i < TCP_ACK_NUM; i++) { + drop_msg = NULL; + + write_seqlock_bh(&ack_m->ack_info[i].seqlock); + del_timer(&ack_m->ack_info[i].timer); + drop_msg = ack_m->ack_info[i].msgbuf; + ack_m->ack_info[i].msgbuf = NULL; + write_sequnlock_bh(&ack_m->ack_info[i].seqlock); + + if (drop_msg) + intf_tcp_drop_msg(priv, drop_msg);//drop skb + } +} + +int tcp_check_quick_ack(unsigned char *buf, + struct tcp_ack_msg *msg) +{ + int ip_hdr_len; + unsigned char *temp; + struct ethhdr *ethhdr; + struct iphdr *iphdr; + struct tcphdr *tcphdr; + + ethhdr = (struct ethhdr *)buf; + if (ethhdr->h_proto != htons(ETH_P_IP)) + return 0; + iphdr = (struct iphdr *)(ethhdr + 1); + if (iphdr->version != 4 || iphdr->protocol != IPPROTO_TCP) + return 0; + ip_hdr_len = iphdr->ihl * 4; + temp = (unsigned char *)(iphdr) + ip_hdr_len; + tcphdr = (struct tcphdr *)temp; + /* TCP_FLAG_ACK */ + if (!(temp[13] & 0x10)) + return 0; + + if (temp[13] & 0x8) { + msg->saddr = iphdr->daddr; + msg->daddr = iphdr->saddr; + msg->source = tcphdr->dest; + msg->dest = tcphdr->source; + msg->seq = ntohl(tcphdr->seq); + return 1; + } + + return 0; +} + +int is_drop_tcp_ack(struct tcphdr *tcphdr, int tcp_tot_len, + unsigned short *win_scale) +{ + //printk("%s \n",__func__); + int drop = 1; + int len = tcphdr->doff * 4; + unsigned char *ptr; + + if(tcp_tot_len > len) { + drop = 0; + } else { + len -= sizeof(struct tcphdr); + ptr = (unsigned char *)(tcphdr + 1); + + while ((len > 0) && drop) { + int opcode = *ptr++; + int opsize; + + switch (opcode) { + case TCPOPT_EOL: + break; + case TCPOPT_NOP: + len--; + continue; + default: + opsize = *ptr++; + if (opsize < 2) + break; + if (opsize > len) + break; + + switch (opcode) { + /* TODO: Add other ignore opt */ + case TCPOPT_TIMESTAMP: + break; + case TCPOPT_WINDOW: + if (*ptr < 15) + *win_scale = (1 << (*ptr)); + //printk("%d\n",*win_scale); + break; + default: + drop = 2; + } + + ptr += opsize - 2; + len -= opsize; + } + } + } + + return drop; +} + + +/* flag:0 for not tcp ack + * 1 for ack which can be drop + * 2 for other ack whith more info + */ + +int tcp_check_ack(unsigned char *buf, + struct tcp_ack_msg *msg, + unsigned short *win_scale) +{ + int ret; + int ip_hdr_len; + int tcp_tot_len; + unsigned char *temp; + struct ethhdr *ethhdr; + struct iphdr *iphdr; + struct tcphdr *tcphdr; + + ethhdr =(struct ethhdr *)buf; + if (ethhdr->h_proto != htons(ETH_P_IP)) + return 0; + + iphdr = (struct iphdr *)(ethhdr + 1); + if (iphdr->version != 4 || iphdr->protocol != IPPROTO_TCP) + return 0; + + ip_hdr_len = iphdr->ihl * 4; + temp = (unsigned char *)(iphdr) + ip_hdr_len; + tcphdr = (struct tcphdr *)temp; + /* TCP_FLAG_ACK */ + if (!(temp[13] & 0x10)) + return 0; + + tcp_tot_len = ntohs(iphdr->tot_len) - ip_hdr_len;// tcp total len + ret = is_drop_tcp_ack(tcphdr, tcp_tot_len, win_scale); + //printk("is drop:%d \n",ret); + + if (ret > 0) { + msg->saddr = iphdr->saddr; + msg->daddr = iphdr->daddr; + msg->source = tcphdr->source; + msg->dest = tcphdr->dest; + msg->seq = ntohl(tcphdr->ack_seq); + msg->win = ntohs(tcphdr->window); + } + + return ret; +} + +/* return val: -1 for not match, others for match */ +int tcp_ack_match(struct tcp_ack_manage *ack_m, + struct tcp_ack_msg *ack_msg) +{ + int i, ret = -1; + unsigned start; + struct tcp_ack_info *ack_info; + struct tcp_ack_msg *ack; + + for (i = 0; ((ret < 0) && (i < TCP_ACK_NUM)); i++) { + ack_info = &ack_m->ack_info[i]; + do { + start = read_seqbegin(&ack_info->seqlock); + ret = -1; + + ack = &ack_info->ack_msg; + if (ack_info->busy && + ack->dest == ack_msg->dest && + ack->source == ack_msg->source && + ack->saddr == ack_msg->saddr && + ack->daddr == ack_msg->daddr) + ret = i; + } while(read_seqretry(&ack_info->seqlock, start)); + } + + return ret; +} + + +void tcp_ack_update(struct tcp_ack_manage *ack_m) +{ + int i; + struct tcp_ack_info *ack_info; + + if (time_after(jiffies, ack_m->last_time + ack_m->timeout)) { + spin_lock_bh(&ack_m->lock); + ack_m->last_time = jiffies; + for (i = TCP_ACK_NUM - 1; i >= 0; i--) { + ack_info = &ack_m->ack_info[i]; + write_seqlock_bh(&ack_info->seqlock); + if (ack_info->busy && + time_after(jiffies, ack_info->last_time + + ack_info->timeout)) { + ack_m->free_index = i; + ack_m->max_num--; + ack_info->busy = 0; + } + write_sequnlock_bh(&ack_info->seqlock); + } + spin_unlock_bh(&ack_m->lock); + } +} + +/* return val: -1 for no index, others for index */ +int tcp_ack_alloc_index(struct tcp_ack_manage *ack_m) +{ + int i, ret = -1; + struct tcp_ack_info *ack_info; + unsigned start; + + spin_lock_bh(&ack_m->lock); + if (ack_m->max_num == TCP_ACK_NUM) { + spin_unlock_bh(&ack_m->lock); + return -1; + } + + if (ack_m->free_index >= 0) { + i = ack_m->free_index; + ack_m->free_index = -1; + ack_m->max_num++; + spin_unlock_bh(&ack_m->lock); + return i; + } + + for (i = 0; ((ret < 0) && (i < TCP_ACK_NUM)); i++) { + ack_info = &ack_m->ack_info[i]; + do { + start = read_seqbegin(&ack_info->seqlock); + ret = -1; + if (!ack_info->busy) { + ack_m->free_index = -1; + ack_m->max_num++; + ret = i; + } + } while(read_seqretry(&ack_info->seqlock, start)); + } + spin_unlock_bh(&ack_m->lock); + + return ret; +} + + +/* return val: 0 for not handle tx, 1 for handle tx */ +int tcp_ack_handle(struct msg_buf *new_msgbuf, + struct tcp_ack_manage *ack_m, + struct tcp_ack_info *ack_info, + struct tcp_ack_msg *ack_msg, + int type) +{ + int quick_ack = 0; + struct tcp_ack_msg *ack; + int ret = 0; + struct msg_buf *drop_msg = NULL; + + //printk("%s %d",__func__,type); + write_seqlock_bh(&ack_info->seqlock); + + ack_info->last_time = jiffies; + ack = &ack_info->ack_msg; + + if (type == 2) { + if (U32_BEFORE(ack->seq, ack_msg->seq)) { + ack->seq = ack_msg->seq; + if (ack_info->psh_flag && + !U32_BEFORE(ack_msg->seq, + ack_info->psh_seq)) { + ack_info->psh_flag = 0; + } + + if (ack_info->msgbuf) { + //printk("%lx \n",ack_info->msgbuf); + drop_msg = ack_info->msgbuf; + ack_info->msgbuf = NULL; + del_timer(&ack_info->timer); + }else{ + //printk("msgbuf is NULL \n"); + } + + ack_info->in_send_msg = NULL; + ack_info->drop_cnt = atomic_read(&ack_m->max_drop_cnt); + } else { + printk("%s before abnormal ack: %d, %d\n", + __func__, ack->seq, ack_msg->seq); + drop_msg = new_msgbuf; + ret = 1; + } + } else if (U32_BEFORE(ack->seq, ack_msg->seq)) { + if (ack_info->msgbuf) { + drop_msg = ack_info->msgbuf; + ack_info->msgbuf = NULL; + } + + if (ack_info->psh_flag && + !U32_BEFORE(ack_msg->seq, ack_info->psh_seq)) { + ack_info->psh_flag = 0; + quick_ack = 1; + } else { + ack_info->drop_cnt++; + } + + ack->seq = ack_msg->seq; + + if (quick_ack || (!ack_info->in_send_msg && + (ack_info->drop_cnt >= + atomic_read(&ack_m->max_drop_cnt)))) { + ack_info->drop_cnt = 0; + ack_info->in_send_msg = new_msgbuf; + del_timer(&ack_info->timer); + } else { + ret = 1; + ack_info->msgbuf = new_msgbuf; + if (!timer_pending(&ack_info->timer)) + mod_timer(&ack_info->timer, + (jiffies + msecs_to_jiffies(5))); + } + } else { + printk("%s before ack: %d, %d\n", + __func__, ack->seq, ack_msg->seq); + drop_msg = new_msgbuf; + ret = 1; + } + + write_sequnlock_bh(&ack_info->seqlock); + + if (drop_msg) + intf_tcp_drop_msg(ack_m->priv, drop_msg);// drop skb + + return ret; +} + +int tcp_ack_handle_new(struct msg_buf *new_msgbuf, + struct tcp_ack_manage *ack_m, + struct tcp_ack_info *ack_info, + struct tcp_ack_msg *ack_msg, + int type) +{ + int quick_ack = 0; + struct tcp_ack_msg *ack; + int ret = 0; + struct msg_buf *drop_msg = NULL; + //struct msg_buf * send_msg = NULL; + //printk("",); + write_seqlock_bh(&ack_info->seqlock); + + ack_info->last_time = jiffies; + ack = &ack_info->ack_msg; + + if(U32_BEFORE(ack->seq, ack_msg->seq)){ + if (ack_info->msgbuf) { + drop_msg = ack_info->msgbuf; + ack_info->msgbuf = NULL; + //ack_info->drop_cnt++; + } + + if (ack_info->psh_flag && + !U32_BEFORE(ack_msg->seq, ack_info->psh_seq)) { + ack_info->psh_flag = 0; + quick_ack = 1; + } else { + ack_info->drop_cnt++; + } + + ack->seq = ack_msg->seq; + + if(quick_ack || (!ack_info->in_send_msg && + (ack_info->drop_cnt >= + atomic_read(&ack_m->max_drop_cnt)))){ + ack_info->drop_cnt = 0; + //send_msg = new_msgbuf; + ack_info->in_send_msg = new_msgbuf; + del_timer(&ack_info->timer); + }else{ + ret = 1; + ack_info->msgbuf = new_msgbuf; + if (!timer_pending(&ack_info->timer)) + mod_timer(&ack_info->timer, + (jiffies + msecs_to_jiffies(5))); + } + + //ret = 1; + }else { + printk("%s before ack: %d, %d\n", + __func__, ack->seq, ack_msg->seq); + drop_msg = new_msgbuf; + ret = 1; + } + + /*if(send_msg){ + intf_tx(ack_m->priv,send_msg); + ack_info->in_send_msg=NULL; + }*/ + + //ack_info->in_send_msg=NULL; + + write_sequnlock_bh(&ack_info->seqlock); + + /*if(send_msg){ + intf_tx(ack_m->priv,send_msg); + //ack_info->in_send_msg=NULL; + }*/ + + if (drop_msg) + intf_tcp_drop_msg(ack_m->priv, drop_msg);// drop skb + + return ret; + +} + +void filter_rx_tcp_ack(struct rwnx_hw *priv, + unsigned char *buf, unsigned plen) +{ + int index; + struct tcp_ack_msg ack_msg; + struct tcp_ack_info *ack_info; + struct tcp_ack_manage *ack_m = &priv->ack_m; + + if (!atomic_read(&ack_m->enable)) + return; + + if ((plen > MAX_TCP_ACK) || + !tcp_check_quick_ack(buf, &ack_msg)) + return; + + index = tcp_ack_match(ack_m, &ack_msg); + if (index >= 0) { + ack_info = ack_m->ack_info + index; + write_seqlock_bh(&ack_info->seqlock); + ack_info->psh_flag = 1; + ack_info->psh_seq = ack_msg.seq; + write_sequnlock_bh(&ack_info->seqlock); + } +} + +/* return val: 0 for not filter, 1 for filter */ +int filter_send_tcp_ack(struct rwnx_hw *priv, + struct msg_buf *msgbuf, + unsigned char *buf, unsigned int plen) +{ +// printk("%s \n",__func__); + int ret = 0; + int index, drop; + unsigned short win_scale = 0; + unsigned int win = 0; + struct tcp_ack_msg ack_msg; + struct tcp_ack_msg *ack; + struct tcp_ack_info *ack_info; + struct tcp_ack_manage *ack_m = &priv->ack_m; + + tcp_ack_update(ack_m); + drop = tcp_check_ack(buf, &ack_msg, &win_scale); +// printk("drop:%d win_scale:%d",drop,win_scale); + if (!drop && (0 == win_scale)) + return 0; + + index = tcp_ack_match(ack_m, &ack_msg); + if (index >= 0) { + ack_info = ack_m->ack_info + index; + if ((0 != win_scale) && + (ack_info->win_scale != win_scale)) { + write_seqlock_bh(&ack_info->seqlock); + ack_info->win_scale = win_scale; + write_sequnlock_bh(&ack_info->seqlock); + } + + if (drop > 0 && atomic_read(&ack_m->enable)) { + win = ack_info->win_scale * ack_msg.win; + if ((win_scale!=0) && (win < (ack_m->ack_winsize * SIZE_KB))) + { + drop = 2; + printk("%d %d %d",win_scale,win,(ack_m->ack_winsize * SIZE_KB)); + } + ret = tcp_ack_handle_new(msgbuf, ack_m, ack_info, + &ack_msg, drop); + } + + goto out; + } + + index = tcp_ack_alloc_index(ack_m); + if (index >= 0) { + write_seqlock_bh(&ack_m->ack_info[index].seqlock); + ack_m->ack_info[index].busy = 1; + ack_m->ack_info[index].psh_flag = 0; + ack_m->ack_info[index].last_time = jiffies; + ack_m->ack_info[index].drop_cnt = + atomic_read(&ack_m->max_drop_cnt); + ack_m->ack_info[index].win_scale = + (win_scale != 0) ? win_scale : 1; + + //ack_m->ack_info[index].msgbuf = NULL; + //ack_m->ack_info[index].in_send_msg = NULL; + ack = &ack_m->ack_info[index].ack_msg; + ack->dest = ack_msg.dest; + ack->source = ack_msg.source; + ack->saddr = ack_msg.saddr; + ack->daddr = ack_msg.daddr; + ack->seq = ack_msg.seq; + write_sequnlock_bh(&ack_m->ack_info[index].seqlock); + } + +out: + return ret; +} + +void move_tcpack_msg(struct rwnx_hw *priv, + struct msg_buf *msg) +{ + struct tcp_ack_info *ack_info; + struct tcp_ack_manage *ack_m = &priv->ack_m; + int i = 0; + + if (!atomic_read(&ack_m->enable)) + return; + + //if (msg->len > MAX_TCP_ACK) + // return; + + for (i = 0; i < TCP_ACK_NUM; i++) { + ack_info = &ack_m->ack_info[i]; + write_seqlock_bh(&ack_info->seqlock); + if (ack_info->busy && (ack_info->in_send_msg == msg)) + ack_info->in_send_msg = NULL; + write_sequnlock_bh(&ack_info->seqlock); + } +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_tcp_ack.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_tcp_ack.h new file mode 100644 index 0000000..542023f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_tcp_ack.h @@ -0,0 +1,92 @@ +#ifndef _AICWF_TCP_ACK_H_ +#define _AICWF_TCP_ACK_H_ + +#include +#include +#include +#include +#include +#include +#include + + +#define TCP_ACK_NUM 32 +#define TCP_ACK_EXIT_VAL 0x800 +#define TCP_ACK_DROP_CNT 10 + +#define ACK_OLD_TIME 4000 +#define U32_BEFORE(a, b) ((__s32)((__u32)a - (__u32)b) <= 0) + +#define MAX_TCP_ACK 200 +/*min window size in KB, it's 256KB*/ +#define MIN_WIN 256 +#define SIZE_KB 1024 + + +struct msg_buf { + //struct list_head list; + struct sk_buff *skb; + struct rwnx_vif *rwnx_vif; + /* data just tx cmd use,not include the head */ +}; + +struct tcp_ack_msg { + u16 source; + u16 dest; + s32 saddr; + s32 daddr; + u32 seq; + u16 win; +}; + + +struct tcp_ack_info { + int ack_info_num; + int busy; + int drop_cnt; + int psh_flag; + u32 psh_seq; + u16 win_scale; + /* seqlock for ack info */ + seqlock_t seqlock; + unsigned long last_time; + unsigned long timeout; + struct timer_list timer; + struct msg_buf *msgbuf; + struct msg_buf *in_send_msg; + struct tcp_ack_msg ack_msg; +}; + +struct tcp_ack_manage { + /* 1 filter */ + atomic_t enable; + int max_num; + int free_index; + unsigned long last_time; + unsigned long timeout; + atomic_t max_drop_cnt; + /* lock for tcp ack alloc and free */ + spinlock_t lock; + struct rwnx_hw *priv; + struct tcp_ack_info ack_info[TCP_ACK_NUM]; + /*size in KB*/ + unsigned int ack_winsize; +}; + +struct msg_buf *intf_tcp_alloc_msg(struct msg_buf *msg); + +void tcp_ack_init(struct rwnx_hw *priv); + +void tcp_ack_deinit(struct rwnx_hw *priv); + + +int is_drop_tcp_ack(struct tcphdr *tcphdr, int tcp_tot_len, unsigned short *win_scale); + +int is_tcp_ack(struct sk_buff *skb, unsigned short *win_scale); + +int filter_send_tcp_ack(struct rwnx_hw *priv, struct msg_buf *msgbuf,unsigned char *buf, unsigned int plen); + +void filter_rx_tcp_ack(struct rwnx_hw *priv,unsigned char *buf, unsigned plen); + +void move_tcpack_msg(struct rwnx_hw *priv, struct msg_buf * msg); +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_txrxif.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_txrxif.c new file mode 100755 index 0000000..b848421 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_txrxif.c @@ -0,0 +1,1358 @@ +/** + * aicwf_bus.c + * + * bus function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lmac_msg.h" +#include "aicwf_txrxif.h" +#include "rwnx_platform.h" +#include "rwnx_defs.h" +#include "rwnx_msg_rx.h" +#include "rwnx_rx.h" +#include "aicwf_rx_prealloc.h" +#ifdef AICWF_SDIO_SUPPORT +#include "sdio_host.h" +#endif + +#ifdef CONFIG_RX_TASKLET +int aicwf_tasklet_rxframes(struct aicwf_rx_priv *rx_priv); +#endif +#ifdef CONFIG_TX_TASKLET +void aicwf_tasklet_tx_process(struct aic_usb_dev *usb_dev); +#endif +extern bool aicwf_usb_rx_aggr; + +#ifdef CONFIG_PREALLOC_RX_SKB +void aicwf_rxframe_queue_init_2(struct rx_frame_queue *pq, int max_len) +{ + //int prio; + + memset(pq, 0, offsetof(struct rx_frame_queue, queuelist) + (sizeof(struct list_head))); + pq->qmax = (u16)max_len; + INIT_LIST_HEAD(&pq->queuelist); +#if 0 + memset(pq, 0, offsetof(struct rx_frame_queue, queuelist) + (sizeof(struct list_head) * num_prio)); + pq->num_prio = (u16)num_prio; + pq->qmax = (u16)max_len; + + for (prio = 0; prio < num_prio; prio++) { + INIT_LIST_HEAD(&pq->queuelist[prio]); + } +#endif +} + +//extern struct aic_sdio_dev *g_sdiodev; +void rxbuff_queue_flush(struct aicwf_rx_priv* rx_priv) +{ + + //int prio; + struct rx_frame_queue *pq = &rx_priv->rxq; + struct list_head *pos; + struct list_head *n; + struct list_head *head; + struct rx_buff *tempbuf = NULL; + + head = &pq->queuelist; + list_for_each_safe(pos, n, head) { + tempbuf = list_entry(pos, struct rx_buff, queue); + list_del_init(&tempbuf->queue); +#if 0 + rxbuff_free(tempbuf); +#else + aicwf_prealloc_rxbuff_free(tempbuf, &rx_priv->rxbuff_lock); +#endif + pq->qcnt--; + } +} +#endif + +int aicwf_bus_init(uint bus_hdrlen, struct device *dev) +{ + int ret = 0; + struct aicwf_bus *bus_if; + + if (!dev) { + AICWFDBG(LOGERROR, "device not found\n"); + return -1; + } + bus_if = dev_get_drvdata(dev); + #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + bus_if->cmd_buf = usb_alloc_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr); + #else + bus_if->cmd_buf = usb_buffer_alloc(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr); + #endif + #else + bus_if->cmd_buf = kzalloc(CMD_BUF_MAX, GFP_KERNEL); + #endif + if(!bus_if->cmd_buf) { + ret = -ENOMEM; + AICWFDBG(LOGERROR, "proto_attach failed\n"); + goto fail; + } + memset(bus_if->cmd_buf, '\0', CMD_BUF_MAX); + + init_completion(&bus_if->bustx_trgg); + init_completion(&bus_if->busrx_trgg); +#ifdef CONFIG_USB_MSG_IN_EP + if(bus_if->bus_priv.usb->msg_in_pipe) { + init_completion(&bus_if->msg_busrx_trgg); + } +#endif + +#ifdef AICWF_SDIO_SUPPORT + bus_if->bustx_thread = kthread_run(sdio_bustx_thread, (void *)bus_if, "aicwf_bustx_thread"); + bus_if->busrx_thread = kthread_run(sdio_busrx_thread, (void *)bus_if->bus_priv.sdio->rx_priv, "aicwf_busrx_thread"); +#endif +#ifdef AICWF_USB_SUPPORT + bus_if->busrx_thread = kthread_run(usb_busrx_thread, (void *)bus_if->bus_priv.usb->rx_priv, "aicwf_busrx_thread"); + bus_if->bustx_thread = kthread_run(usb_bustx_thread, (void *)bus_if, "aicwf_bustx_thread"); + +#if 1 + //waiting for rx/tx thread init finish +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0) + while(bus_if->busrx_thread->__state != TASK_INTERRUPTIBLE || + bus_if->bustx_thread->__state != TASK_INTERRUPTIBLE) +#else + while(bus_if->busrx_thread->state != TASK_INTERRUPTIBLE || + bus_if->bustx_thread->state != TASK_INTERRUPTIBLE) +#endif + { + AICWFDBG(LOGINFO, "%s waiting for rx/tx thread init finish \r\n", __func__); + msleep(100); + } + //waiting for rx/tx thread Initialization finish +#endif + +#ifdef CONFIG_USB_MSG_IN_EP + if(bus_if->bus_priv.usb->msg_in_pipe) + { + bus_if->msg_busrx_thread = kthread_run(usb_msg_busrx_thread, (void *)bus_if->bus_priv.usb->rx_priv, "aicwf_msg_busrx_thread"); + } +#endif + +#ifdef CONFIG_RX_TASKLET//AIDEN tasklet + AICWFDBG(LOGINFO, "%s use tasklet for rx \r\n", __func__); + tasklet_init(&((bus_if->bus_priv.usb)->recv_tasklet), + (void(*)(unsigned long))aicwf_tasklet_rxframes, + (unsigned long)(bus_if->bus_priv.usb)->rx_priv); +#endif + +#ifdef CONFIG_TX_TASKLET//AIDEN tasklet + AICWFDBG(LOGINFO, "%s use tasklet for tx \r\n", __func__); + tasklet_init(&((bus_if->bus_priv.usb)->xmit_tasklet), + (void(*)(unsigned long))aicwf_tasklet_tx_process, + (unsigned long)(bus_if->bus_priv.usb)); +#endif + + +#endif + + if (IS_ERR(bus_if->bustx_thread)) { + bus_if->bustx_thread = NULL; + AICWFDBG(LOGERROR, "aicwf_bustx_thread run fail\n"); + goto fail; + } + + if (IS_ERR(bus_if->busrx_thread)) { + bus_if->busrx_thread = NULL; + AICWFDBG(LOGERROR, "aicwf_bustx_thread run fail\n"); + goto fail; + } + +#ifdef CONFIG_USB_MSG_IN_EP + if(bus_if->bus_priv.usb->msg_in_pipe){ + if (IS_ERR(bus_if->msg_busrx_thread)) { + bus_if->msg_busrx_thread = NULL; + txrx_err("aicwf_msg_busrx_thread run fail\n"); + goto fail; + } + } +#endif + + + return ret; +fail: + aicwf_bus_deinit(dev); + + return ret; +} + +void aicwf_usb_cancel_all_urbs(struct aic_usb_dev *usb_dev); + +void aicwf_bus_deinit(struct device *dev) +{ + struct aicwf_bus *bus_if; +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usb; +#endif +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev; +#endif + + if (!dev) { + txrx_err("device not found\n"); + return; + } + AICWFDBG(LOGINFO, "%s Enter\r\n", __func__); + + bus_if = dev_get_drvdata(dev); + aicwf_bus_stop(bus_if); + +#ifdef AICWF_USB_SUPPORT + usb = bus_if->bus_priv.usb; + aicwf_usb_cancel_all_urbs(usb);//AIDEN + + if(g_rwnx_plat && g_rwnx_plat->enabled){ + rwnx_platform_deinit(usb->rwnx_hw); + }else{ + AICWFDBG(LOGINFO, "%s g_rwnx_plat->enabled is false\r\n", __func__); + } + +#endif +#ifdef AICWF_SDIO_SUPPORT + sdiodev =bus_if->bus_priv.sdio; + if(g_rwnx_plat && g_rwnx_plat->enabled) + rwnx_platform_deinit(sdiodev->rwnx_hw); +#endif + + if (bus_if->cmd_buf) { + #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + usb_free_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr); + #else + usb_buffer_free(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr); + #endif + #else + kfree(bus_if->cmd_buf); + #endif + bus_if->cmd_buf = NULL; + } + + if (bus_if->bustx_thread) { + AICWFDBG(LOGINFO, "%s stop to bustx_thread!! \r\n", __func__); + + + complete_all(&bus_if->bustx_trgg); + kthread_stop(bus_if->bustx_thread); + bus_if->bustx_thread = NULL; + } + + if (usb->rx_priv) + aicwf_rx_deinit(usb->rx_priv); + + rwnx_cmd_mgr_deinit(&usb->cmd_mgr); + +#ifdef CONFIG_TX_TASKLET//AIDEN tasklet + tasklet_kill(&usb->xmit_tasklet); +#endif + + AICWFDBG(LOGINFO, "%s Exit \n", __func__); +} + +void aicwf_frame_tx(void *dev, struct sk_buff *skb) +{ +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *)dev; + aicwf_bus_txdata(sdiodev->bus_if, skb); +#else + struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev; + struct rwnx_txhdr *txhdr = NULL; + unsigned long flags; + + if (!usbdev->state) { + txrx_err("down\n"); + spin_lock_irqsave(&usbdev->tx_flow_lock, flags); + if(!usbdev->tbusy) + aicwf_usb_tx_flowctrl(usbdev->rwnx_hw, true); + spin_unlock_irqrestore(&usbdev->tx_flow_lock, flags); + txhdr = (struct rwnx_txhdr *)skb->data; + kmem_cache_free(usbdev->rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr); + dev_kfree_skb(skb); + return; + } + aicwf_bus_txdata(usbdev->bus_if, skb); +#endif +} + +struct aicwf_tx_priv* aicwf_tx_init(void *arg) +{ + struct aicwf_tx_priv* tx_priv; + + tx_priv = kzalloc(sizeof(struct aicwf_tx_priv), GFP_KERNEL); + if (!tx_priv) + return NULL; + +#ifdef AICWF_SDIO_SUPPORT + tx_priv->sdiodev = (struct aic_sdio_dev *)arg; +#else + tx_priv->usbdev = (struct aic_usb_dev *)arg; +#endif + + atomic_set(&tx_priv->aggr_count, 0); + tx_priv->aggr_buf = dev_alloc_skb(MAX_AGGR_TXPKT_LEN); + if(!tx_priv->aggr_buf) { + txrx_err("Alloc bus->txdata_buf failed!\n"); + kfree(tx_priv); + return NULL; + } + tx_priv->head = tx_priv->aggr_buf->data; + tx_priv->tail = tx_priv->aggr_buf->data; + + return tx_priv; +} + +void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv) +{ + if (tx_priv && tx_priv->aggr_buf) + dev_kfree_skb(tx_priv->aggr_buf); + + kfree(tx_priv); +} + +#ifdef CONFIG_PREALLOC_RX_SKB +static bool aicwf_another_ptk_1(struct rx_buff *buffer) +{ + u8 *read = buffer->read; + u16 aggr_len = 0; + + if(read == NULL || read >= (buffer->end - 2)) { + return false; + } + + if(buffer->len < 2) { + return false; + } + + aggr_len = (*read | (*(read + 1) << 8)); + if(aggr_len == 0) { + return false; + } + + return true; +} +#else +static bool aicwf_another_ptk(struct sk_buff *skb) +{ + u8 *data; + u16 aggr_len = 0; + + if (skb->data == NULL || skb->len < 2) { + return false; + } + data = skb->data; + aggr_len = (*skb->data | (*(skb->data + 1) << 8)); + if (aggr_len == 0) { + return false; + } + + return true; +} +#endif + + +#if 0//AIDEN +void rwnx_frame_parser(char* tag, char* data, unsigned long len); +#endif + +#ifdef CONFIG_RX_TASKLET +int aicwf_tasklet_rxframes(struct aicwf_rx_priv *rx_priv) +{ + int ret = 0; + unsigned long flags = 0; + struct sk_buff *skb = NULL; /* Packet for event or data frames */ + u16 pkt_len = 0; + //struct sk_buff *skb_inblock = NULL; + u16 aggr_len = 0, adjust_len = 0; + u8 *data = NULL; + u8_l *msg = NULL; + + while (1) { + //aicwf_usb_rx_submit_all_urb_(rx_priv->usbdev); + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if(aicwf_is_framequeue_empty(&rx_priv->rxq)) { + usb_info("no more rxdata\n"); + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + break; + } + skb = aicwf_frame_dequeue(&rx_priv->rxq); + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + if (skb == NULL) { + txrx_err("skb_error\r\n"); + break; + } + data = skb->data; + pkt_len = (*skb->data | (*(skb->data + 1) << 8)); + //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]); +#ifndef CONFIG_USB_RX_REASSEMBLE + if (pkt_len > skb->len) { + AICWFDBG(LOGERROR, "%s pkt_len:%d skb->len:%d\r\n", __func__, pkt_len, skb->len); + dev_kfree_skb(skb); + atomic_dec(&rx_priv->rx_cnt); + continue; + } +#endif + if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data +#if 0 + aggr_len = pkt_len + RX_HWHRD_LEN; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_ATOMIC);//8 is for ccmp mic or wep icv + if(skb_inblock == NULL){ + txrx_err("no more space! skip!\n"); + skb_pull(skb, adjust_len); + continue; + } + + skb_put(skb_inblock, aggr_len); + memcpy(skb_inblock->data, data, aggr_len); +#if 0//AIDEN + rwnx_frame_parser((char*)__func__, skb_inblock->data + 60, aggr_len - 60); +#endif +#endif + rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb, (void *)rx_priv); + ///TODO: here need to add rx data process + //skb_pull(skb, adjust_len); + } + else { // type : config + aggr_len = pkt_len; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + msg = kmalloc(aggr_len+4, GFP_ATOMIC); + if(msg == NULL){ + txrx_err("no more space for msg!\n"); + aicwf_dev_skb_free(skb); + return -EBADE; + } + memcpy(msg, data, aggr_len + 4); + if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) + rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4)); + + if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM) + aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4)); + skb_pull(skb, adjust_len+4); + kfree(msg); + dev_kfree_skb(skb); + } + + //dev_kfree_skb(skb); + atomic_dec(&rx_priv->rx_cnt); + } + + return ret; +} +#endif + +extern bool rx_urb_sched; +int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv) +{ +#ifdef AICWF_SDIO_SUPPORT + int ret = 0; + unsigned long flags = 0; + struct sk_buff *skb = NULL; + u16 pkt_len = 0; + struct sk_buff *skb_inblock = NULL; + u16 aggr_len = 0, adjust_len = 0; + u8 *data = NULL; + u8_l *msg = NULL; + + while (1) { + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if(aicwf_is_framequeue_empty(&rx_priv->rxq)) { + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + break; + } + skb = aicwf_frame_dequeue(&rx_priv->rxq); + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + if (skb == NULL) { + txrx_err("skb_error\r\n"); + break; + } + while(aicwf_another_ptk(skb)) { + data = skb->data; + pkt_len = (*skb->data | (*(skb->data + 1) << 8)); + + if((skb->data[2] & SDIO_TYPE_CFG) != SDIO_TYPE_CFG) { // type : data + aggr_len = pkt_len + RX_HWHRD_LEN; + + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL); + if(skb_inblock == NULL){ + txrx_err("no more space! skip\n"); + skb_pull(skb, adjust_len); + continue; + } + + skb_put(skb_inblock, aggr_len); + memcpy(skb_inblock->data, data, aggr_len); + rwnx_rxdataind_aicwf(rx_priv->sdiodev->rwnx_hw, skb_inblock, (void *)rx_priv); + skb_pull(skb, adjust_len); + } + else { // type : config + aggr_len = pkt_len; + + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + msg = kmalloc(aggr_len+4, GFP_KERNEL); + if(msg == NULL){ + txrx_err("no more space for msg!\n"); + aicwf_dev_skb_free(skb); + return -EBADE; + } + + memcpy(msg, data, aggr_len + 4); + if((*(msg + 2) & 0x7f) == SDIO_TYPE_CFG_CMD_RSP) + rwnx_rx_handle_msg(rx_priv->sdiodev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4)); + + if((*(msg + 2) & 0x7f) == SDIO_TYPE_CFG_DATA_CFM) + aicwf_sdio_host_tx_cfm_handler(&(rx_priv->sdiodev->rwnx_hw->sdio_env), (u32 *)(msg + 4)); + skb_pull(skb, adjust_len+4); + kfree(msg); + } + } + + dev_kfree_skb(skb); + atomic_dec(&rx_priv->rx_cnt); + } + + aicwf_sdio_pwr_stctl(rx_priv->sdiodev, SDIO_ACTIVE_ST); + + return ret; +#else //AICWF_USB_SUPPORT + int ret = 0; + unsigned long flags = 0; +#ifndef CONFIG_PREALLOC_RX_SKB + struct sk_buff *skb = NULL;/* Packet for event or data frames */ +#endif + struct sk_buff *skb_inblock = NULL; + + u16 pkt_len = 0; + u16 aggr_len = 0, adjust_len = 0; + u8 *data = NULL; + u8_l *msg = NULL; +//#ifdef CONFIG_USB_RX_AGGR +// struct sk_buff *skb_inblock = NULL; + u8 cnt = 0 ; +//#endif +#ifdef CONFIG_PREALLOC_RX_SKB + struct rx_buff *buffer = NULL; + if(aicwf_usb_rx_aggr){ + while (1) { + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if (!rx_priv->rxq.qcnt) { + usb_info("no more rxdata\n"); + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + break; + } + buffer = rxbuff_dequeue(&rx_priv->rxq); + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + + if (buffer == NULL) { + txrx_err("skb_error\r\n"); + ASSERT_ERR(1); + break; + } + + while(aicwf_another_ptk_1(buffer)) { + cnt++; + data = buffer->read; + pkt_len = (*data | (*(data + 1) << 8)); + //printk("%s cnt:%d pkt_len:%d \r\n", __func__, cnt, pkt_len); +#ifndef CONFIG_USB_RX_REASSEMBLE + if (pkt_len > buffer->len) { + AICWFDBG(LOGERROR, "%s pkt_len:%d buffer->len:%d\r\n", __func__, pkt_len, buffer->len); + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + atomic_dec(&rx_priv->rx_cnt); + return -EBADE; + } +#endif + + if((data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data + aggr_len = pkt_len + RX_HWHRD_LEN; + + skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL); + if (skb_inblock == NULL) { + txrx_err("no more space! skip\n"); + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + atomic_dec(&rx_priv->rx_cnt); + return -EBADE; + } + + skb_put(skb_inblock, aggr_len); + memcpy(skb_inblock->data, data, aggr_len); + rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb_inblock, (void *)rx_priv); + buffer->read = buffer->read + aggr_len; + buffer->len -= aggr_len; + } + else { // type : config + aggr_len = pkt_len; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + msg = kmalloc(aggr_len+4, GFP_KERNEL); + if(msg == NULL){ + txrx_err("no more space for msg!\n"); + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + return -EBADE; + } + memcpy(msg, data, aggr_len + 4); + + if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST)) + rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4)); + + if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM) + aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4)); + + if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT) + rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len); + + buffer->read = buffer->read + (adjust_len + 4); + buffer->len -= (adjust_len + 4); + kfree(msg); + } + } + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + if (rx_urb_sched) { + schedule_work(&rx_priv->usbdev->rx_urb_work); + rx_urb_sched = false; + } + atomic_dec(&rx_priv->rx_cnt); + } + }else{ + while (1) { + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if (!rx_priv->rxq.qcnt) { + usb_info("no more rxdata\n"); + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + break; + } + buffer = rxbuff_dequeue(&rx_priv->rxq); + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + + if (buffer == NULL) { + txrx_err("skb_error\r\n"); + ASSERT_ERR(1); + break; + } + data = buffer->data; + pkt_len = (*data | (*(data + 1) << 8)); + //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]); +#ifndef CONFIG_USB_RX_REASSEMBLE + if (pkt_len > buffer->len) { + AICWFDBG(LOGERROR, "%s pkt_len:%d buffer->len:%d\r\n", __func__, pkt_len, buffer->len); + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + atomic_dec(&rx_priv->rx_cnt); + continue; + } +#endif + if((data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data + skb_inblock = __dev_alloc_skb(pkt_len + RX_HWHRD_LEN + CCMP_OR_WEP_INFO, GFP_KERNEL); + if (skb_inblock == NULL) { + txrx_err("no more space! skip\n"); + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + atomic_dec(&rx_priv->rx_cnt); + continue; + } + + skb_put(skb_inblock, pkt_len + RX_HWHRD_LEN); + memcpy(skb_inblock->data, data, pkt_len + RX_HWHRD_LEN); + rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb_inblock, (void *)rx_priv); + } + else { // type : config + aggr_len = pkt_len; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + msg = kmalloc(aggr_len+4, GFP_KERNEL); + if(msg == NULL){ + txrx_err("no more space for msg!\n"); + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + return -EBADE; + } + memcpy(msg, data, aggr_len + 4); + + if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST)) + rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4)); + + if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM) + aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4)); + + if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT) + rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len); + + kfree(msg); + } + + aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock); + if (rx_urb_sched) { + schedule_work(&rx_priv->usbdev->rx_urb_work); + rx_urb_sched = false; + } + atomic_dec(&rx_priv->rx_cnt); + } + } +#else + if(aicwf_usb_rx_aggr){ + while (1) { + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if(aicwf_is_framequeue_empty(&rx_priv->rxq)) { + usb_info("no more rxdata\n"); + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + break; + } + skb = aicwf_frame_dequeue(&rx_priv->rxq); + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + + if (skb == NULL) { + txrx_err("skb_error\r\n"); + ASSERT_ERR(1); + break; + } + while(aicwf_another_ptk(skb)) { + cnt++; + if (cnt > 30) { + //printk("%s err, break %d\n", __func__, cnt); + //break; + } + data = skb->data; + pkt_len = (*skb->data | (*(skb->data + 1) << 8)); + //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]); +#ifndef CONFIG_USB_RX_REASSEMBLE + if (pkt_len > skb->len) { + AICWFDBG(LOGERROR, "%s pkt_len:%d skb->len:%d\r\n", __func__, pkt_len, skb->len); + //dev_kfree_skb(skb); + //atomic_dec(&rx_priv->rx_cnt); + //continue; + break; + } +#endif + if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data + aggr_len = pkt_len + RX_HWHRD_LEN; + adjust_len = aggr_len; + skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL);//8 is for ccmp mic or wep icv + if(skb_inblock == NULL){ + txrx_err("no more space! skip!\n"); + skb_pull(skb, adjust_len); + continue; + } + + skb_put(skb_inblock, aggr_len); + memcpy(skb_inblock->data, data, aggr_len); + rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb_inblock, (void *)rx_priv); + + ///TODO: here need to add rx data process + + skb_pull(skb, adjust_len); + + } + else { // type : config + aggr_len = pkt_len; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + msg = kmalloc(aggr_len+4, GFP_KERNEL);//hdr(4)+config(aggrlen) + if(msg == NULL){ + txrx_err("no more space for msg!\n"); + aicwf_dev_skb_free(skb); + return -EBADE; + } + memcpy(msg, data, aggr_len + 4); + + if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST)) + rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4)); + + if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM) + aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4)); + + if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT) + rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len); + + + skb_pull(skb, adjust_len+4); + kfree(msg); + + } + } + dev_kfree_skb(skb); + atomic_dec(&rx_priv->rx_cnt); + } + }else{ + while (1) { + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if(aicwf_is_framequeue_empty(&rx_priv->rxq)) { + usb_info("no more rxdata\n"); + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + break; + } + skb = aicwf_frame_dequeue(&rx_priv->rxq); + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + + if (skb == NULL) { + txrx_err("skb_error\r\n"); + ASSERT_ERR(1); + break; + } + data = skb->data; + pkt_len = (*skb->data | (*(skb->data + 1) << 8)); + //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]); +#ifndef CONFIG_USB_RX_REASSEMBLE + if (pkt_len > skb->len) { + AICWFDBG(LOGERROR, "%s pkt_len:%d skb->len:%d\r\n", __func__, pkt_len, skb->len); + dev_kfree_skb(skb); + atomic_dec(&rx_priv->rx_cnt); + continue; + } +#endif + + if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data + rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb, (void *)rx_priv); + } + else { // type : config + aggr_len = pkt_len; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + msg = kmalloc(aggr_len+4, GFP_KERNEL); + if(msg == NULL){ + txrx_err("no more space for msg!\n"); + aicwf_dev_skb_free(skb); + return -EBADE; + } + memcpy(msg, data, aggr_len + 4); + + if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST)) + rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4)); + + if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM) + aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4)); + + if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT) + rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len); + + skb_pull(skb, adjust_len+4); + kfree(msg); + dev_kfree_skb(skb); + } + atomic_dec(&rx_priv->rx_cnt); + } + } +#endif + + return ret; +#endif //AICWF_SDIO_SUPPORT +} + + +#ifdef CONFIG_USB_MSG_IN_EP +int aicwf_process_msg_rxframes(struct aicwf_rx_priv *rx_priv) +{ + int ret = 0; + unsigned long flags = 0; + struct sk_buff *skb = NULL; /* Packet for event or data frames */ + u16 pkt_len = 0; + struct sk_buff *skb_inblock = NULL; + u16 aggr_len = 0, adjust_len = 0; + u8 *data = NULL; + u8_l *msg = NULL; + + while (1) { + spin_lock_irqsave(&rx_priv->msg_rxqlock, flags); + if(aicwf_is_framequeue_empty(&rx_priv->msg_rxq)) { + usb_info("no more rxmsg\n"); + spin_unlock_irqrestore(&rx_priv->msg_rxqlock,flags); + break; + } + skb = aicwf_frame_dequeue(&rx_priv->msg_rxq); + spin_unlock_irqrestore(&rx_priv->msg_rxqlock, flags); + if (skb == NULL) { + txrx_err("skb_error\r\n"); + break; + } + data = skb->data; + pkt_len = (*skb->data | (*(skb->data + 1) << 8)); + //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]); +#if 0 //amsdu > 1600 + if (pkt_len > 1600) { + dev_kfree_skb(skb); + atomic_dec(&rx_priv->rx_cnt); + continue; + } +#endif + + if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data + aggr_len = pkt_len + RX_HWHRD_LEN; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL);//8 is for ccmp mic or wep icv + if(skb_inblock == NULL){ + txrx_err("no more space! skip!\n"); + skb_pull(skb, adjust_len); + continue; + } + + skb_put(skb_inblock, aggr_len); + memcpy(skb_inblock->data, data, aggr_len); +#if 0//AIDEN + rwnx_frame_parser((char*)__func__, skb_inblock->data + 60, aggr_len - 60); +#endif + rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb_inblock, (void *)rx_priv); + ///TODO: here need to add rx data process + + skb_pull(skb, adjust_len); + } + else { // type : config + aggr_len = pkt_len; + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + msg = kmalloc(aggr_len+4, GFP_KERNEL); + if(msg == NULL){ + txrx_err("no more space for msg!\n"); + aicwf_dev_skb_free(skb); + return -EBADE; + } + memcpy(msg, data, aggr_len + 4); + + if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST)) + rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4)); + + if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM) + aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4)); + + if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT) + rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len); + + + skb_pull(skb, adjust_len+4); + kfree(msg); + } + + dev_kfree_skb(skb); + atomic_dec(&rx_priv->msg_rx_cnt); + } + + return ret; +} +#endif + + +#ifdef AICWF_RX_REORDER +static struct recv_msdu *aicwf_rxframe_queue_init(struct list_head *q, int qsize) +{ + int i; + struct recv_msdu *req, *reqs; + + reqs = vmalloc(qsize*sizeof(struct recv_msdu)); + if (reqs == NULL) + return NULL; + + req = reqs; + for (i = 0; i < qsize; i++) { + INIT_LIST_HEAD(&req->rxframe_list); + list_add(&req->rxframe_list, q); + //req->len = 0; + req++; + } + + return reqs; +} +#endif +struct aicwf_rx_priv *aicwf_rx_init(void *arg) +{ + struct aicwf_rx_priv* rx_priv; + rx_priv = kzalloc(sizeof(struct aicwf_rx_priv), GFP_KERNEL); + if (!rx_priv) + return NULL; + +#ifdef AICWF_SDIO_SUPPORT + rx_priv->sdiodev = (struct aic_sdio_dev *)arg; +#else + rx_priv->usbdev = (struct aic_usb_dev *)arg; +#endif + +#ifdef CONFIG_PREALLOC_RX_SKB + aicwf_rxframe_queue_init_2(&rx_priv->rxq, MAX_RXQLEN); +#else + aicwf_frame_queue_init(&rx_priv->rxq, 1, MAX_RXQLEN); +#endif + spin_lock_init(&rx_priv->rxqlock); +#ifdef CONFIG_PREALLOC_RX_SKB + spin_lock_init(&rx_priv->rxbuff_lock); + //aicwf_prealloc_init(); +#endif + atomic_set(&rx_priv->rx_cnt, 0); + +#ifdef CONFIG_USB_MSG_IN_EP + if(rx_priv->usbdev->msg_in_pipe){ + aicwf_frame_queue_init(&rx_priv->msg_rxq, 1, MAX_RXQLEN); + spin_lock_init(&rx_priv->msg_rxqlock); + atomic_set(&rx_priv->msg_rx_cnt, 0); + } +#endif + + +#ifdef AICWF_RX_REORDER + INIT_LIST_HEAD(&rx_priv->rxframes_freequeue); + spin_lock_init(&rx_priv->freeq_lock); + rx_priv->recv_frames = aicwf_rxframe_queue_init(&rx_priv->rxframes_freequeue, MAX_REORD_RXFRAME); + if (!rx_priv->recv_frames) { + txrx_err("no enough buffer for free recv frame queue!\n"); + kfree(rx_priv); + return NULL; + } + spin_lock_init(&rx_priv->stas_reord_lock); + INIT_LIST_HEAD(&rx_priv->stas_reord_list); +#endif + + return rx_priv; +} + +#ifdef AICWF_RX_REORDER +static void aicwf_recvframe_queue_deinit(struct list_head *q) +{ + struct recv_msdu *req, *next; + + list_for_each_entry_safe(req, next, q, rxframe_list) { + list_del_init(&req->rxframe_list); + } +} +#endif +void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv) +{ +#ifdef AICWF_RX_REORDER + struct reord_ctrl_info *reord_info, *tmp; + + AICWFDBG(LOGINFO, "%s Enter\n", __func__); + + list_for_each_entry_safe(reord_info, tmp, + &rx_priv->stas_reord_list, list) { + reord_deinit_sta(rx_priv, reord_info); + } + +#endif + AICWFDBG(LOGINFO, "stio rx thread\n"); +#ifdef AICWF_SDIO_SUPPORT + if (rx_priv->sdiodev->bus_if->busrx_thread) { + complete_all(&rx_priv->sdiodev->bus_if->busrx_trgg); + kthread_stop(rx_priv->sdiodev->bus_if->busrx_thread); + rx_priv->sdiodev->bus_if->busrx_thread = NULL; + } +#endif +#ifdef AICWF_USB_SUPPORT + +#ifdef CONFIG_RX_TASKLET//AIDEN tasklet + tasklet_kill(&rx_priv->usbdev->recv_tasklet); +#endif + if (rx_priv->usbdev->bus_if->busrx_thread) { + complete_all(&rx_priv->usbdev->bus_if->busrx_trgg); + kthread_stop(rx_priv->usbdev->bus_if->busrx_thread); + rx_priv->usbdev->bus_if->busrx_thread = NULL; + } +#ifdef CONFIG_USB_MSG_IN_EP + if(rx_priv->usbdev->msg_in_pipe){ + if (rx_priv->usbdev->bus_if->msg_busrx_thread) { + complete_all(&rx_priv->usbdev->bus_if->msg_busrx_trgg); + kthread_stop(rx_priv->usbdev->bus_if->msg_busrx_thread); + rx_priv->usbdev->bus_if->msg_busrx_thread = NULL; + } + } +#endif +#ifdef CONFIG_USB_RX_REASSEMBLE + if (rx_priv->rx_reassemble_skb) { + aicwf_dev_skb_free(rx_priv->rx_reassemble_skb); + } + if (rx_priv->rx_msg_reassemble_skb) { + aicwf_dev_skb_free(rx_priv->rx_msg_reassemble_skb); + } +#endif + +#endif + +#ifdef CONFIG_USB_MSG_IN_EP + if(rx_priv->usbdev->msg_in_pipe){ + aicwf_frame_queue_flush(&rx_priv->msg_rxq); + } +#endif + +#ifdef CONFIG_PREALLOC_RX_SKB + rxbuff_queue_flush(rx_priv); +#else + aicwf_frame_queue_flush(&rx_priv->rxq); +#endif + +#ifdef AICWF_RX_REORDER + aicwf_recvframe_queue_deinit(&rx_priv->rxframes_freequeue); + if (rx_priv->recv_frames) + vfree(rx_priv->recv_frames); +#endif + +#ifdef CONFIG_PREALLOC_RX_SKB + //aicwf_prealloc_exit(); +#endif + kfree(rx_priv); + AICWFDBG(LOGINFO, "%s Exit\n", __func__); + +} + +bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt) +{ + return aicwf_frame_enq(dev, q, pkt, 0); +} + + +void aicwf_dev_skb_free(struct sk_buff *skb) +{ + if (!skb) + return; + + dev_kfree_skb_any(skb); +} + +static struct sk_buff *aicwf_frame_queue_penq(struct frame_queue *pq, int prio, struct sk_buff *p) +{ + struct sk_buff_head *q; + + if (pq->queuelist[prio].qlen >= pq->qmax) + return NULL; + + q = &pq->queuelist[prio]; + __skb_queue_tail(q, p); + pq->qcnt++; + if (pq->hi_prio < prio) + pq->hi_prio = (u16)prio; + + return p; +} + +void aicwf_frame_queue_flush(struct frame_queue *pq) +{ + int prio; + struct sk_buff_head *q; + struct sk_buff *p, *next; + + for (prio = 0; prio < pq->num_prio; prio++) + { + q = &pq->queuelist[prio]; + skb_queue_walk_safe(q, p, next) { + skb_unlink(p, q); + aicwf_dev_skb_free(p); + pq->qcnt--; + } + } +} + +void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len) +{ + int prio; + + memset(pq, 0, offsetof(struct frame_queue, queuelist) + (sizeof(struct sk_buff_head) * num_prio)); + pq->num_prio = (u16)num_prio; + pq->qmax = (u16)max_len; + + for (prio = 0; prio < num_prio; prio++) { + skb_queue_head_init(&pq->queuelist[prio]); + } +} + +struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out) +{ + int prio; + + if (pq->qcnt == 0) + return NULL; + + for (prio = 0; prio < pq->hi_prio; prio++) + if (!skb_queue_empty(&pq->queuelist[prio])) + break; + + if (prio_out) + *prio_out = prio; + + return skb_peek_tail(&pq->queuelist[prio]); +} + +bool aicwf_is_framequeue_empty(struct frame_queue *pq) +{ + int prio, len = 0; + + for (prio = 0; prio <= pq->hi_prio; prio++) + len += pq->queuelist[prio].qlen; + + if(len > 0) + return false; + else + return true; +} + +struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq) +{ + struct sk_buff_head *q; + struct sk_buff *p; + int prio; + + if (pq->qcnt == 0) + return NULL; + + while ((prio = pq->hi_prio) > 0 && skb_queue_empty(&pq->queuelist[prio])) + pq->hi_prio--; + + q = &pq->queuelist[prio]; + p = __skb_dequeue(q); + if (p == NULL) + return NULL; + + pq->qcnt--; + + return p; +} + +static struct sk_buff *aicwf_skb_dequeue_tail(struct frame_queue *pq, int prio) +{ + struct sk_buff_head *q = &pq->queuelist[prio]; + struct sk_buff *p = skb_dequeue_tail(q); + + if (!p) + return NULL; + + pq->qcnt--; + + return p; +} + +bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio) +{ + struct sk_buff *p = NULL; + int prio_modified = -1; + + if (q->queuelist[prio].qlen < q->qmax && q->qcnt < q->qmax) { + aicwf_frame_queue_penq(q, prio, pkt); + return true; + } + if (q->queuelist[prio].qlen >= q->qmax) { + prio_modified = prio; + } else if (q->qcnt >= q->qmax) { + p = aicwf_frame_queue_peek_tail(q, &prio_modified); + if (prio_modified > prio) + return false; + } + + if (prio_modified >= 0) { + if (prio_modified == prio) + return false; + + p = aicwf_skb_dequeue_tail(q, prio_modified); + aicwf_dev_skb_free(p); + + p = aicwf_frame_queue_penq(q, prio_modified, pkt); + if (p == NULL) + txrx_err("failed\n"); + } + + return p != NULL; +} + +#ifdef CONFIG_PREALLOC_RX_SKB +void rxbuff_free(struct rx_buff *rxbuff) +{ + kfree(rxbuff->data); + kfree(rxbuff); +} + +struct rx_buff *rxbuff_queue_penq(struct rx_frame_queue *pq, struct rx_buff *p) +{ + + struct list_head *q; + if (pq->qcnt >= pq->qmax) + return NULL; + + q = &pq->queuelist; + list_add_tail(&p->queue,q); + + pq->qcnt++; + + return p; +} + +struct rx_buff *rxbuff_dequeue(struct rx_frame_queue *pq) +{ + struct rx_buff *p = NULL; + + if (pq->qcnt == 0) { + printk("%s %d, rxq is empty\n", __func__, __LINE__); + return NULL; + } + + if(list_empty(&pq->queuelist)) { + printk("%s %d, rxq is empty\n", __func__, __LINE__); + return NULL; + } else { + p = list_first_entry(&pq->queuelist, struct rx_buff, queue); + list_del_init(&p->queue); + pq->qcnt--; + } + + return p; +} + +bool aicwf_rxbuff_enqueue(struct device *dev, struct rx_frame_queue *rxq, struct rx_buff *pkt) +{ +// struct rx_buff *p = NULL; + + if ((rxq == NULL) || (pkt == NULL)) { + printk("%s %d, rxq or pkt is NULL\n", __func__, __LINE__); + return false; + } + + if (rxq->qcnt < rxq->qmax) { + if (rxbuff_queue_penq(rxq, pkt)) { + return true; + } else { + printk("%s %d, rxbuff enqueue fail\n", __func__, __LINE__); + return false; + } + } else { + printk("%s %d, rxq or pkt is full\n", __func__, __LINE__); + return false; + } +} +#endif + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_txrxif.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_txrxif.h new file mode 100755 index 0000000..3e40e38 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_txrxif.h @@ -0,0 +1,313 @@ +/** + * aicwf_txrxif.h + * + * bus function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#ifndef _AICWF_TXRXIF_H_ +#define _AICWF_TXRXIF_H_ + +#include +#include +#include "ipc_shared.h" +#include "aicwf_rx_prealloc.h" +#ifdef AICWF_SDIO_SUPPORT +#include "aicwf_sdio.h" +#else +#include "aicwf_usb.h" +#endif + +#define CMD_BUF_MAX 1536 +#define DATA_BUF_MAX 2048 +#define TXPKT_BLOCKSIZE 512 +#define MAX_AGGR_TXPKT_LEN (1536*64) +#define CMD_TX_TIMEOUT 5000 +#define TX_ALIGNMENT 4 + +#ifdef CONFIG_USB_TX_AGGR +#define MAX_USB_AGGR_TXPKT_LEN (1536*15) +#endif + +#define RX_HWHRD_LEN 60 //58->60 word allined +#define CCMP_OR_WEP_INFO 8 +#define MAX_RXQLEN 2000 +#define RX_ALIGNMENT 4 + +#define DEBUG_ERROR_LEVEL 0 +#define DEBUG_DEBUG_LEVEL 1 +#define DEBUG_INFO_LEVEL 2 + +#define DBG_LEVEL DEBUG_DEBUG_LEVEL + +#define txrx_err(fmt, ...) pr_err("txrx_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__) +#define sdio_err(fmt, ...) pr_err("sdio_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__) +#define usb_err(fmt, ...) pr_err("usb_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__) +#if DBG_LEVEL >= DEBUG_DEBUG_LEVEL +#define txrx_dbg(fmt, ...) printk("txrx: " fmt, ##__VA_ARGS__) +#define sdio_dbg(fmt, ...) printk("aicsdio: " fmt, ##__VA_ARGS__) +#define usb_dbg(fmt, ...) printk("aicusb: " fmt, ##__VA_ARGS__) +#else +#define txrx_dbg(fmt, ...) +#define sdio_dbg(fmt, ...) +#define usb_dbg(fmt, ...) +#endif +#if DBG_LEVEL >= DEBUG_INFO_LEVEL +#define txrx_info(fmt, ...) printk("aicsdio: " fmt, ##__VA_ARGS__) +#define sdio_info(fmt, ...) printk("aicsdio: " fmt, ##__VA_ARGS__) +#define usb_info(fmt, ...) printk("aicusb: " fmt, ##__VA_ARGS__) +#else +#define txrx_info(fmt, ...) +#define sdio_info(fmt, ...) +#define usb_info(fmt, ...) +#endif + +enum aicwf_bus_state { + BUS_DOWN_ST, + BUS_UP_ST +}; + +struct aicwf_bus_ops { + int (*start) (struct device * dev); + void (*stop) (struct device * dev); + int (*txdata) (struct device * dev, struct sk_buff * skb); + int (*txmsg) (struct device * dev, u8 * msg, uint len); +}; + +struct frame_queue { + u16 num_prio; + u16 hi_prio; + u16 qmax; /* max number of queued frames */ + u16 qcnt; + struct sk_buff_head queuelist[8]; +}; + +#ifdef CONFIG_PREALLOC_RX_SKB +struct rx_frame_queue { + u16 qmax; /* max number of queued frames */ + u16 qcnt; + struct list_head queuelist; +}; +#endif + +struct aicwf_bus { + union { + struct aic_sdio_dev *sdio; + struct aic_usb_dev *usb; + } bus_priv; + struct device *dev; + struct aicwf_bus_ops *ops; + enum aicwf_bus_state state; + u8 *cmd_buf; + struct completion bustx_trgg; + struct completion busrx_trgg; +#ifdef CONFIG_USB_MSG_IN_EP + struct completion msg_busrx_trgg; +#endif + + struct task_struct *bustx_thread; + struct task_struct *busrx_thread; +#ifdef CONFIG_USB_MSG_IN_EP + struct task_struct *msg_busrx_thread; +#endif + +}; + +struct aicwf_tx_priv { +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev; + int fw_avail_bufcnt; + //for cmd tx + u8 *cmd_buf; + uint cmd_len; + bool cmd_txstate; + bool cmd_tx_succ; + struct semaphore cmd_txsema; + wait_queue_head_t cmd_txdone_wait; + //for data tx + atomic_t tx_pktcnt; + + struct frame_queue txq; + spinlock_t txqlock; + struct semaphore txctl_sema; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usbdev; +#ifdef CONFIG_USB_TX_AGGR + int fw_avail_bufcnt; + + //for data tx + atomic_t tx_pktcnt; + + struct frame_queue txq; + spinlock_t txqlock; + spinlock_t txdlock; +#endif +#endif//AICWF_USB_SUPPORT + struct sk_buff *aggr_buf; + atomic_t aggr_count; + u8 *head; + u8 *tail; +}; + + +#define DEFRAG_MAX_WAIT 40 //100 +#ifdef AICWF_RX_REORDER +#define MAX_REORD_RXFRAME 250 +#define REORDER_UPDATE_TIME 500//50 +#define AICWF_REORDER_WINSIZE 64 +//SN_LESS(a, b) a-b<0 is ture +#define SN_LESS(a, b) (((a-b)&0x800)!=0) +#define SN_EQUAL(a, b) (a == b) + +struct reord_ctrl { + struct aicwf_rx_priv *rx_priv; + u8 enable; + u16 ind_sn; + u8 wsize_b; + spinlock_t reord_list_lock; + struct list_head reord_list; + struct timer_list reord_timer; + struct work_struct reord_timer_work; +}; + +struct reord_ctrl_info { + u8 mac_addr[6]; + struct reord_ctrl preorder_ctrl[8]; + struct list_head list; +}; + +struct recv_msdu { + struct sk_buff *pkt; + u8 tid; + u8 forward; + u16 seq_num; + //uint len; + u8 is_amsdu; + u8 is_ap_reord; + u8 ap_fwd_cnt; + u8 ap_resend_cnt; + u8 *rx_data; + struct sk_buff *first_fwd_skb; + struct sk_buff *last_fwd_skb; + struct sk_buff *first_resend_skb; + struct sk_buff *last_resend_skb; + //for pending rx reorder list + struct list_head reord_pending_list; + //for total frame list, when rxframe from busif, dequeue, when submit frame to net, enqueue + struct list_head rxframe_list; + struct reord_ctrl *preorder_ctrl; +}; +#endif + +struct aicwf_rx_priv { +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usbdev; +#endif + + void *rwnx_vif; + atomic_t rx_cnt; + u32 data_len; + spinlock_t rxqlock; +#ifdef CONFIG_PREALLOC_RX_SKB + struct rx_frame_queue rxq; +#else + struct frame_queue rxq; +#endif +#ifdef CONFIG_USB_RX_REASSEMBLE + struct sk_buff *rx_reassemble_skb; + u32 rx_reassemble_total_len; + u32 rx_reassemble_cur_len; + u32 rx_reassemble_total_frags; + u32 rx_reassemble_cur_frags; + + struct sk_buff *rx_msg_reassemble_skb; + u32 rx_msg_reassemble_total_len; + u32 rx_msg_reassemble_cur_len; + u32 rx_msg_reassemble_total_frags; + u32 rx_msg_reassemble_cur_frags; +#endif + +#ifdef CONFIG_USB_MSG_IN_EP + atomic_t msg_rx_cnt; + spinlock_t msg_rxqlock; + struct frame_queue msg_rxq; +#endif + + +#ifdef AICWF_RX_REORDER + spinlock_t freeq_lock; + struct list_head rxframes_freequeue; + struct list_head stas_reord_list; + spinlock_t stas_reord_lock; + struct recv_msdu *recv_frames; +#endif + +#ifdef CONFIG_PREALLOC_RX_SKB + spinlock_t rxbuff_lock; +#endif + +}; + +static inline int aicwf_bus_start(struct aicwf_bus *bus) +{ + return bus->ops->start(bus->dev); +} + +static inline void aicwf_bus_stop(struct aicwf_bus *bus) +{ + bus->ops->stop(bus->dev); +} + +static inline int aicwf_bus_txdata(struct aicwf_bus *bus, struct sk_buff *skb) +{ + return bus->ops->txdata(bus->dev, skb); +} + +static inline int aicwf_bus_txmsg(struct aicwf_bus *bus, u8 *msg, uint len) +{ + return bus->ops->txmsg(bus->dev, msg, len); +} + +static inline void aicwf_sched_timeout(u32 millisec) +{ + ulong timeout = 0, expires = 0; + expires = jiffies + msecs_to_jiffies(millisec); + timeout = millisec; + + while (timeout) { + timeout = schedule_timeout(timeout); + if (time_after(jiffies, expires)) + break; + } +} + +int aicwf_bus_init(uint bus_hdrlen, struct device *dev); +void aicwf_bus_deinit(struct device *dev); +void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv); +void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv); +struct aicwf_tx_priv* aicwf_tx_init(void *arg); +struct aicwf_rx_priv* aicwf_rx_init(void *arg); +void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len); +void aicwf_frame_queue_flush(struct frame_queue *pq); +bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio); +bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt); +bool aicwf_is_framequeue_empty(struct frame_queue *pq); +void aicwf_frame_tx(void *dev, struct sk_buff *skb); +void aicwf_dev_skb_free(struct sk_buff *skb); +struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq); +struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out); +#ifdef CONFIG_PREALLOC_RX_SKB +void rxbuff_queue_flush(struct aicwf_rx_priv* rx_priv); +void aicwf_rxframe_queue_init_2(struct rx_frame_queue *pq, int max_len); +void rxbuff_free(struct rx_buff *rxbuff); +struct rx_buff *rxbuff_dequeue(struct rx_frame_queue *pq); +bool aicwf_rxbuff_enqueue(struct device *dev, struct rx_frame_queue *rxq, struct rx_buff *pkt); +extern struct aicwf_rx_buff_list aic_rx_buff_list; +#endif + +#endif /* _AICWF_TXRXIF_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_usb.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_usb.c new file mode 100644 index 0000000..ad5ae0a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_usb.c @@ -0,0 +1,2734 @@ +/** + * aicwf_usb.c + * + * USB function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#include +#include +#include +#include "aicwf_txrxif.h" +#include "aicwf_usb.h" +#include "rwnx_tx.h" +#include "rwnx_defs.h" +#include "usb_host.h" +#include "rwnx_platform.h" +#include "rwnx_msg_tx.h" + +#ifdef CONFIG_GPIO_WAKEUP +#ifdef CONFIG_PLATFORM_ROCKCHIP +#include +#endif +static int wakeup_enable; +static u32 hostwake_irq_num; +atomic_t irq_count; +spinlock_t irq_lock; +#endif + +#include +extern struct semaphore aicwf_deinit_sem; +extern atomic_t aicwf_deinit_atomic; +#define SEM_TIMOUT 2000 + + +#ifdef CONFIG_TXRX_THREAD_PRIO + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#include "uapi/linux/sched/types.h" +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +#include "linux/sched/types.h" +#else +#include "linux/sched/rt.h" +#endif + +int bustx_thread_prio = 1; +module_param(bustx_thread_prio, int, 0); +int busrx_thread_prio = 1; +module_param(busrx_thread_prio, int, 0); +#endif + +#ifdef CONFIG_USB_RX_AGGR +bool aicwf_usb_rx_aggr = true; +#else +bool aicwf_usb_rx_aggr = false; +#endif +atomic_t rx_urb_cnt; +bool rx_urb_sched = false; +u32 aicwf_usb_max_pkt_size = AICWF_USB_MAX_PKT_SIZE; + +void aicwf_usb_tx_flowctrl(struct rwnx_hw *rwnx_hw, bool state) +{ + struct rwnx_vif *rwnx_vif; + + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (!rwnx_vif || !rwnx_vif->ndev || !rwnx_vif->up) + continue; + if (state) + netif_tx_stop_all_queues(rwnx_vif->ndev);//netif_stop_queue(rwnx_vif->ndev); + else + netif_tx_wake_all_queues(rwnx_vif->ndev);//netif_wake_queue(rwnx_vif->ndev); + } +} + +static struct aicwf_usb_buf *aicwf_usb_tx_dequeue(struct aic_usb_dev *usb_dev, + struct list_head *q, int *counter, spinlock_t *qlock) +{ + unsigned long flags; + struct aicwf_usb_buf *usb_buf; + + spin_lock_irqsave(qlock, flags); + if (list_empty(q)) { + usb_buf = NULL; + } else { + usb_buf = list_first_entry(q, struct aicwf_usb_buf, list); + list_del_init(&usb_buf->list); + if (counter) + (*counter)--; + } + spin_unlock_irqrestore(qlock, flags); + return usb_buf; +} + +static void aicwf_usb_tx_queue(struct aic_usb_dev *usb_dev, + struct list_head *q, struct aicwf_usb_buf *usb_buf, int *counter, + spinlock_t *qlock) +{ + unsigned long flags; + + spin_lock_irqsave(qlock, flags); + list_add_tail(&usb_buf->list, q); + (*counter)++; + spin_unlock_irqrestore(qlock, flags); +} + +static struct aicwf_usb_buf *aicwf_usb_rx_buf_get(struct aic_usb_dev *usb_dev) +{ + unsigned long flags; + struct aicwf_usb_buf *usb_buf; + + spin_lock_irqsave(&usb_dev->rx_free_lock, flags); + if (list_empty(&usb_dev->rx_free_list)) { + usb_buf = NULL; + } else { + usb_buf = list_first_entry(&usb_dev->rx_free_list, struct aicwf_usb_buf, list); + list_del_init(&usb_buf->list); + } + spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags); + return usb_buf; +} + +static void aicwf_usb_rx_buf_put(struct aic_usb_dev *usb_dev, struct aicwf_usb_buf *usb_buf) +{ + unsigned long flags; + + spin_lock_irqsave(&usb_dev->rx_free_lock, flags); + list_add_tail(&usb_buf->list, &usb_dev->rx_free_list); + spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags); +} + +#ifdef CONFIG_USB_MSG_IN_EP +static struct aicwf_usb_buf *aicwf_usb_msg_rx_buf_get(struct aic_usb_dev *usb_dev) +{ + unsigned long flags; + struct aicwf_usb_buf *usb_buf; + + spin_lock_irqsave(&usb_dev->msg_rx_free_lock, flags); + if (list_empty(&usb_dev->msg_rx_free_list)) { + usb_buf = NULL; + } else { + usb_buf = list_first_entry(&usb_dev->msg_rx_free_list, struct aicwf_usb_buf, list); + list_del_init(&usb_buf->list); + } + spin_unlock_irqrestore(&usb_dev->msg_rx_free_lock, flags); + return usb_buf; +} + +static void aicwf_usb_msg_rx_buf_put(struct aic_usb_dev *usb_dev, struct aicwf_usb_buf *usb_buf) +{ + unsigned long flags; + + spin_lock_irqsave(&usb_dev->msg_rx_free_lock, flags); + list_add_tail(&usb_buf->list, &usb_dev->msg_rx_free_list); + spin_unlock_irqrestore(&usb_dev->msg_rx_free_lock, flags); +} +#endif + +void rwnx_stop_sta_all_queues(struct rwnx_sta *sta, struct rwnx_hw *rwnx_hw) +{ + u8 tid; + struct rwnx_txq *txq; + for(tid=0; tid<8; tid++) { + txq = rwnx_txq_sta_get(sta, tid, rwnx_hw); + netif_stop_subqueue(txq->ndev, txq->ndev_idx); + } +} + +void rwnx_wake_sta_all_queues(struct rwnx_sta *sta, struct rwnx_hw *rwnx_hw) +{ + u8 tid; + struct rwnx_txq *txq; + for(tid=0; tid<8; tid++) { + txq = rwnx_txq_sta_get(sta, tid, rwnx_hw); + netif_wake_subqueue(txq->ndev, txq->ndev_idx); + } +} + +static void usb_txc_sta_flowctrl(struct aicwf_usb_buf *usb_buf, struct aic_usb_dev *usb_dev) +{ +#ifdef CONFIG_PER_STA_FC + unsigned long flags; + struct rwnx_sta *sta; + struct txdesc_api *hostdesc; + u8 sta_idx; + if(usb_buf->cfm) + hostdesc = (struct txdesc_api *)((u8 *)usb_buf->skb + 4); + else + hostdesc = (struct txdesc_api *)((u8 *)usb_buf->skb->data + 4); + //printk("txcpl: sta %d\n", hostdesc->host.staid); + sta_idx = hostdesc->host.staid; + if(sta_idx < NX_REMOTE_STA_MAX && !(hostdesc->host.flags & TXU_CNTRL_MGMT)) { + struct rwnx_vif *vif = NULL; + sta = &usb_dev->rwnx_hw->sta_table[sta_idx]; + vif = usb_dev->rwnx_hw->vif_table[sta->vif_idx]; + spin_lock_irqsave(&usb_dev->tx_flow_lock, flags); + atomic_dec(&usb_dev->rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt); + //printk("sta:%d, pending:%d, flowctrl=%d\n", sta->sta_idx, sta->tx_pending_cnt, sta->flowctrl); + if(RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP) { + if(atomic_read(&usb_dev->rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt) < AICWF_USB_FC_PERSTA_LOW_WATER && + usb_dev->rwnx_hw->sta_flowctrl[sta_idx].flowctrl) { + //AICWFDBG(LOGDEBUG, "sta 0x%x:0x%x, %d pending %d, wake\n", sta->mac_addr[4], sta->mac_addr[5], sta->sta_idx, atomic_read(&usb_dev->rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt)); + if(!usb_dev->tbusy) + rwnx_wake_sta_all_queues(sta, usb_dev->rwnx_hw); + usb_dev->rwnx_hw->sta_flowctrl[sta_idx].flowctrl = 0; + } + } + spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags); + } +#endif +} + +static void aicwf_usb_tx_complete(struct urb *urb) +{ + unsigned long flags; + struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context; + struct aic_usb_dev *usb_dev = usb_buf->usbdev; + #ifndef CONFIG_USB_TX_AGGR + struct sk_buff *skb; + #endif + + usb_txc_sta_flowctrl(usb_buf, usb_dev); + +#ifdef CONFIG_USB_ALIGN_DATA + if(usb_buf->usb_align_data) { + kfree(usb_buf->usb_align_data); + usb_buf->usb_align_data = NULL; + } +#endif +#ifndef CONFIG_USB_TX_AGGR + if (usb_buf->cfm == false) { + skb = usb_buf->skb; + dev_kfree_skb_any(skb); + } + #if !defined CONFIG_USB_NO_TRANS_DMA_MAP + else { + u8 *buf; + buf = (u8 *)usb_buf->skb; + kfree(buf); + } + #endif + usb_buf->skb = NULL; +#else + AICWFDBG(LOGDEBUG,"tx com %d\n", usb_buf->aggr_cnt); + usb_buf->aggr_cnt = 0; +#endif//CONFIG_USB_TX_AGGR + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + + spin_lock_irqsave(&usb_dev->tx_flow_lock, flags); + if (usb_dev->tx_free_count > AICWF_USB_TX_HIGH_WATER) { + if (usb_dev->tbusy) { + usb_dev->tbusy = false; + aicwf_usb_tx_flowctrl(usb_dev->rwnx_hw, false); + } + } + spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags); +} + +void aicwf_usb_rx_submit_all_urb_(struct aic_usb_dev *usb_dev); + +#ifdef CONFIG_PREALLOC_RX_SKB +static void aicwf_usb_rx_complete(struct urb *urb) +{ + struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context; + struct aic_usb_dev *usb_dev = usb_buf->usbdev; + struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv; + struct rx_buff *rx_buff = NULL; + unsigned long flags = 0; + + rx_buff = usb_buf->rx_buff; + usb_buf->rx_buff = NULL; + + atomic_dec(&rx_urb_cnt); + if(atomic_read(&rx_urb_cnt) < 10){ + AICWFDBG(LOGDEBUG, "%s %d \r\n", __func__, atomic_read(&rx_urb_cnt)); + //printk("%s %d \r\n", __func__, atomic_read(&rx_urb_cnt)); + } + + if(!usb_dev->rwnx_hw){ + aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + AICWFDBG(LOGERROR, "usb_dev->rwnx_hw is not ready \r\n"); + return; + } + + if (urb->actual_length > urb->transfer_buffer_length) { + aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + + if (urb->status != 0 || !urb->actual_length) { + aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + if(urb->status < 0){ + AICWFDBG(LOGDEBUG, "%s urb->status:%d \r\n", __func__, urb->status); + + if(g_rwnx_plat->wait_disconnect_cb == false){ + g_rwnx_plat->wait_disconnect_cb = true; + if(atomic_read(&aicwf_deinit_atomic) > 0){ + atomic_set(&aicwf_deinit_atomic, 0); + down(&aicwf_deinit_sem); + AICWFDBG(LOGINFO, "%s need to wait for disconnect callback \r\n", __func__); + }else{ + g_rwnx_plat->wait_disconnect_cb = false; + } + } + + return; + }else{ + //schedule_work(&usb_dev->rx_urb_work); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + } + + if (usb_dev->state == USB_UP_ST) { + spin_lock_irqsave(&rx_priv->rxqlock, flags); + //if (aicwf_usb_rx_aggr) { + rx_buff->len = urb->actual_length; + //printk("%s rx_buff->len:%d \r\n", __func__, rx_buff->len); + //} + + if(!aicwf_rxbuff_enqueue(usb_dev->dev, &rx_priv->rxq, rx_buff)){ + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + usb_err("rx_priv->rxq is over flow!!!\n"); + aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + atomic_inc(&rx_priv->rx_cnt); + + if(atomic_read(&rx_priv->rx_cnt) == 1){ + complete(&rx_priv->usbdev->bus_if->busrx_trgg); + } + + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + } else { + aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + } +} + +#else +static void aicwf_usb_rx_complete(struct urb *urb) +{ + struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context; + struct aic_usb_dev *usb_dev = usb_buf->usbdev; + struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv; + struct sk_buff *skb = NULL; + unsigned long flags = 0; + + skb = usb_buf->skb; + usb_buf->skb = NULL; + + atomic_dec(&rx_urb_cnt); + if(atomic_read(&rx_urb_cnt) < 10){ + AICWFDBG(LOGDEBUG, "%s %d \r\n", __func__, atomic_read(&rx_urb_cnt)); + //printk("%s %d \r\n", __func__, atomic_read(&rx_urb_cnt)); + } + + if(!usb_dev->rwnx_hw){ + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + AICWFDBG(LOGERROR, "usb_dev->rwnx_hw is not ready \r\n"); + return; + } + + if (urb->actual_length > urb->transfer_buffer_length) { + usb_err("urb_rx len error %u/%u\n", urb->actual_length, urb->transfer_buffer_length); + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + + if (urb->status != 0 || !urb->actual_length) { + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + if(urb->status < 0){ + AICWFDBG(LOGDEBUG, "%s urb->status:%d \r\n", __func__, urb->status); + + if(g_rwnx_plat->wait_disconnect_cb == false){ + g_rwnx_plat->wait_disconnect_cb = true; + if(atomic_read(&aicwf_deinit_atomic) > 0){ + atomic_set(&aicwf_deinit_atomic, 0); + down(&aicwf_deinit_sem); + AICWFDBG(LOGINFO, "%s need to wait for disconnect callback \r\n", __func__); + }else{ + g_rwnx_plat->wait_disconnect_cb = false; + } + } + + return; + }else{ + //schedule_work(&usb_dev->rx_urb_work); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + } + +#if 0 + if ((urb->actual_length > 1600 * 30) && (aicwf_usb_rx_aggr)) { + printk("r%d\n", urb->actual_length); + } +#endif + + if (usb_dev->state == USB_UP_ST) { + + skb_put(skb, urb->actual_length); + + if (aicwf_usb_rx_aggr) { + skb->len = urb->actual_length; + } else { +#ifdef CONFIG_USB_RX_REASSEMBLE + bool pkt_check = false; + if (rx_priv->rx_reassemble_skb) { + u32 frag_len = skb->len; + struct sk_buff *reassemble_skb = rx_priv->rx_reassemble_skb; + bool reassemble_valid = false; + bool reassemble_done = false; + if ((rx_priv->rx_reassemble_cur_frags + 1) == rx_priv->rx_reassemble_total_frags) { + if ((rx_priv->rx_reassemble_cur_len + frag_len) == rx_priv->rx_reassemble_total_len) { + reassemble_valid = true; + reassemble_done = true; + } + } else { + if (frag_len == aicwf_usb_max_pkt_size) { + reassemble_valid = true; + } + } + + if (reassemble_valid) { + memcpy((reassemble_skb->data + reassemble_skb->len), skb->data, frag_len); + skb_put(reassemble_skb, skb->len); + rx_priv->rx_reassemble_cur_len += frag_len; + rx_priv->rx_reassemble_cur_frags++; + aicwf_dev_skb_free(skb); + if (reassemble_done) { + skb = reassemble_skb; + rx_priv->rx_reassemble_skb = NULL; + rx_priv->rx_reassemble_total_len = 0; + rx_priv->rx_reassemble_cur_len = 0; + rx_priv->rx_reassemble_total_frags = 0; + rx_priv->rx_reassemble_cur_frags = 0; + } else { + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + } else { + usb_err("invalid frag pkt, len=%u/%u/%u, frags=%u/%u\n", frag_len, + rx_priv->rx_reassemble_cur_len, rx_priv->rx_reassemble_cur_len, + rx_priv->rx_reassemble_cur_frags, rx_priv->rx_reassemble_total_frags); + aicwf_dev_skb_free(reassemble_skb); + rx_priv->rx_reassemble_skb = NULL; + rx_priv->rx_reassemble_total_len = 0; + rx_priv->rx_reassemble_cur_len = 0; + rx_priv->rx_reassemble_total_frags = 0; + rx_priv->rx_reassemble_cur_frags = 0; + pkt_check = true; + } + } else { + pkt_check = true; + } + + if (pkt_check) { + bool pkt_drop = false; + u8 type = skb->data[2]; + u32 pkt_len = skb->data[0] | (skb->data[1] << 8); + if ((type & USB_TYPE_CFG) != USB_TYPE_CFG) { + u32 pkt_total_len = pkt_len + RX_HWHRD_LEN; + if ((pkt_total_len > aicwf_usb_max_pkt_size) && (skb->len == aicwf_usb_max_pkt_size)) { + AICWFDBG(LOGINFO, "reassemble pkt, len=%u\n", pkt_total_len); + struct sk_buff *reassemble_skb = __dev_alloc_skb(pkt_total_len, GFP_ATOMIC/*GFP_KERNEL*/); + if (reassemble_skb) { + memcpy(reassemble_skb->data, skb->data, skb->len); + skb_put(reassemble_skb, skb->len); + rx_priv->rx_reassemble_skb = reassemble_skb; + rx_priv->rx_reassemble_total_len = pkt_total_len; + rx_priv->rx_reassemble_cur_len = skb->len; + rx_priv->rx_reassemble_total_frags = ALIGN(pkt_total_len, aicwf_usb_max_pkt_size) / aicwf_usb_max_pkt_size; + rx_priv->rx_reassemble_cur_frags = 1; + } else { + usb_err("reassemble pkt alloc fail, len=%u\n", pkt_total_len); + } + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } else if (pkt_total_len != skb->len) { + usb_err("invalid DATA, len=%u/%u\n", pkt_len, skb->len); + pkt_drop = true; + } + } else { + if (type == USB_TYPE_CFG_CMD_RSP) { + u32 pkt_total_len = ALIGN((pkt_len + 4), 4); + if ((pkt_total_len > aicwf_usb_max_pkt_size) && (skb->len == aicwf_usb_max_pkt_size)) { + AICWFDBG(LOGINFO, "reassemble pkt, len=%u\n", pkt_total_len); + struct sk_buff *reassemble_skb = __dev_alloc_skb(pkt_total_len, GFP_ATOMIC/*GFP_KERNEL*/); + if (reassemble_skb) { + memcpy(reassemble_skb->data, skb->data, skb->len); + skb_put(reassemble_skb, skb->len); + rx_priv->rx_reassemble_skb = reassemble_skb; + rx_priv->rx_reassemble_total_len = pkt_total_len; + rx_priv->rx_reassemble_cur_len = skb->len; + rx_priv->rx_reassemble_total_frags = ALIGN(pkt_total_len, aicwf_usb_max_pkt_size) / aicwf_usb_max_pkt_size; + rx_priv->rx_reassemble_cur_frags = 1; + } else { + usb_err("reassemble pkt alloc fail, len=%u\n", pkt_total_len); + } + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } else if (pkt_total_len != skb->len) { + usb_err("invalid CMD_RSP, len=%u/%u\n", pkt_len, skb->len); + pkt_drop = true; + } + } else if (type == USB_TYPE_CFG_DATA_CFM) { + if (!((pkt_len == 8) && (skb->len == 12))) { + usb_err("invalid DATA_CFM, len=%u/%u\n", pkt_len, skb->len); + pkt_drop = true; + } + } else { + usb_err("invalid pkt, type=0x%x, len=%u/%u\n", type, pkt_len, skb->len); + pkt_drop = true; + } + } + + if (pkt_drop) { + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + } +#endif + } + + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if(!aicwf_rxframe_enqueue(usb_dev->dev, &rx_priv->rxq, skb)){ + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + usb_err("rx_priv->rxq is over flow!!!\n"); + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + return; + } + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + atomic_inc(&rx_priv->rx_cnt); + +#ifndef CONFIG_RX_TASKLET + //if(!rx_priv->rx_thread_working && (atomic_read(&rx_priv->rx_cnt)>0)){ + if(atomic_read(&rx_priv->rx_cnt) == 1){ + complete(&rx_priv->usbdev->bus_if->busrx_trgg); + } +#else + tasklet_schedule(&rx_priv->usbdev->recv_tasklet); +#endif + + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_rx_submit_all_urb_(usb_dev); + //schedule_work(&usb_dev->rx_urb_work); + } else { + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + } +} +#endif + +#ifdef CONFIG_USB_MSG_IN_EP +void aicwf_usb_msg_rx_submit_all_urb_(struct aic_usb_dev *usb_dev); + +static void aicwf_usb_msg_rx_complete(struct urb *urb) +{ + struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context; + struct aic_usb_dev *usb_dev = usb_buf->usbdev; + struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv; + struct sk_buff *skb = NULL; + unsigned long flags = 0; + + skb = usb_buf->skb; + usb_buf->skb = NULL; + + if (urb->actual_length > urb->transfer_buffer_length) { + usb_err("usb_msg_rx len error %u/%u\n", urb->actual_length, urb->transfer_buffer_length); + aicwf_dev_skb_free(skb); + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_msg_rx_submit_all_urb_(usb_dev); + //schedule_work(&usb_dev->msg_rx_urb_work); + return; + } + + if (urb->status != 0 || !urb->actual_length) { + aicwf_dev_skb_free(skb); + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + + if(urb->status < 0){ + AICWFDBG(LOGDEBUG, "%s urb->status:%d \r\n", __func__, urb->status); + return; + }else{ + aicwf_usb_msg_rx_submit_all_urb_(usb_dev); + //schedule_work(&usb_dev->msg_rx_urb_work); + return; + } + } + + if (usb_dev->state == USB_UP_ST) { + skb_put(skb, urb->actual_length); + +#ifdef CONFIG_USB_RX_REASSEMBLE + bool pkt_check = false; + if (rx_priv->rx_msg_reassemble_skb) { + u32 frag_len = skb->len; + struct sk_buff *reassemble_skb = rx_priv->rx_msg_reassemble_skb; + bool reassemble_valid = false; + bool reassemble_done = false; + if ((rx_priv->rx_msg_reassemble_cur_frags + 1) == rx_priv->rx_msg_reassemble_total_frags) { + if ((rx_priv->rx_msg_reassemble_cur_len + frag_len) == rx_priv->rx_msg_reassemble_total_len) { + reassemble_valid = true; + reassemble_done = true; + } + } else { + if (frag_len == AICWF_USB_MSG_MAX_PKT_SIZE) { + reassemble_valid = true; + } + } + + if (reassemble_valid) { + memcpy((reassemble_skb->data + reassemble_skb->len), skb->data, frag_len); + skb_put(reassemble_skb, skb->len); + rx_priv->rx_msg_reassemble_cur_len += frag_len; + rx_priv->rx_msg_reassemble_cur_frags++; + aicwf_dev_skb_free(skb); + if (reassemble_done) { + skb = reassemble_skb; + rx_priv->rx_msg_reassemble_skb = NULL; + rx_priv->rx_msg_reassemble_total_len = 0; + rx_priv->rx_msg_reassemble_cur_len = 0; + rx_priv->rx_msg_reassemble_total_frags = 0; + rx_priv->rx_msg_reassemble_cur_frags = 0; + } else { + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_msg_rx_submit_all_urb_(usb_dev); + return; + } + } else { + usb_err("invalid frag msg pkt, len=%u/%u/%u, frags=%u/%u\n", frag_len, + rx_priv->rx_msg_reassemble_cur_len, rx_priv->rx_msg_reassemble_cur_len, + rx_priv->rx_msg_reassemble_cur_frags, rx_priv->rx_msg_reassemble_total_frags); + aicwf_dev_skb_free(reassemble_skb); + rx_priv->rx_msg_reassemble_skb = NULL; + rx_priv->rx_msg_reassemble_total_len = 0; + rx_priv->rx_msg_reassemble_cur_len = 0; + rx_priv->rx_msg_reassemble_total_frags = 0; + rx_priv->rx_msg_reassemble_cur_frags = 0; + pkt_check = true; + } + } else { + pkt_check = true; + } + + if (pkt_check) { + bool pkt_drop = false; + u8 type = skb->data[2]; + u32 pkt_len = skb->data[0] | (skb->data[1] << 8); + if ((type & USB_TYPE_CFG) != USB_TYPE_CFG) { + usb_err("invalid msg pkt, type=0x%x, len=%u/%u\n", type, pkt_len, skb->len);; + pkt_drop = true; + } else { + if (type == USB_TYPE_CFG_CMD_RSP) { + u32 pkt_total_len = ALIGN((pkt_len + 4), 4); + if ((pkt_total_len > AICWF_USB_MSG_MAX_PKT_SIZE) && (skb->len == AICWF_USB_MSG_MAX_PKT_SIZE)) { + AICWFDBG(LOGINFO, "reassemble msg pkt, len=%u\n", pkt_total_len); + struct sk_buff *reassemble_skb = __dev_alloc_skb(pkt_total_len, GFP_ATOMIC/*GFP_KERNEL*/); + if (reassemble_skb) { + memcpy(reassemble_skb->data, skb->data, skb->len); + skb_put(reassemble_skb, skb->len); + rx_priv->rx_msg_reassemble_skb = reassemble_skb; + rx_priv->rx_msg_reassemble_total_len = pkt_total_len; + rx_priv->rx_msg_reassemble_cur_len = skb->len; + rx_priv->rx_msg_reassemble_total_frags = ALIGN(pkt_total_len, AICWF_USB_MSG_MAX_PKT_SIZE) / AICWF_USB_MSG_MAX_PKT_SIZE; + rx_priv->rx_msg_reassemble_cur_frags = 1; + } else { + usb_err("reassemble msg pkt alloc fail, len=%u\n", pkt_total_len); + } + aicwf_dev_skb_free(skb); + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_msg_rx_submit_all_urb_(usb_dev); + return; + } else if (pkt_total_len != skb->len) { + usb_err("invalid CMD_RSP, len=%u/%u\n", pkt_len, skb->len); + pkt_drop = true; + } + } else if (type == USB_TYPE_CFG_DATA_CFM) { + if (!((pkt_len == 8) && (skb->len == 12))) { + usb_err("invalid DATA_CFM, len=%u/%u\n", pkt_len, skb->len); + pkt_drop = true; + } + } else { + usb_err("invalid msg pkt, type=0x%x, len=%u/%u\n", type, pkt_len, skb->len); + pkt_drop = true; + } + } + if (pkt_drop) { + aicwf_dev_skb_free(skb); + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_msg_rx_submit_all_urb_(usb_dev); + return; + } + } +#endif + + spin_lock_irqsave(&rx_priv->msg_rxqlock, flags); + if(!aicwf_rxframe_enqueue(usb_dev->dev, &rx_priv->msg_rxq, skb)){ + spin_unlock_irqrestore(&rx_priv->msg_rxqlock, flags); + usb_err("rx_priv->rxq is over flow!!!\n"); + aicwf_dev_skb_free(skb); + return; + } + spin_unlock_irqrestore(&rx_priv->msg_rxqlock, flags); + atomic_inc(&rx_priv->msg_rx_cnt); + complete(&rx_priv->usbdev->bus_if->msg_busrx_trgg); + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + aicwf_usb_msg_rx_submit_all_urb_(usb_dev); + //schedule_work(&usb_dev->msg_rx_urb_work); + } else { + aicwf_dev_skb_free(skb); + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + } +} +#endif + +#ifdef CONFIG_PREALLOC_RX_SKB +//extern int aic_rxbuff_size; +static int aicwf_usb_submit_rx_urb(struct aic_usb_dev *usb_dev, + struct aicwf_usb_buf *usb_buf) +{ + int ret; + struct rx_buff *rx_buff; + + if (!usb_buf || !usb_dev) + return -1; + + if (usb_dev->state != USB_UP_ST) { + usb_err("usb state is not up!\r\n"); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + return -1; + } + rx_buff = aicwf_prealloc_rxbuff_alloc(&usb_dev->rx_priv->rxbuff_lock); + if (rx_buff == NULL) { + AICWFDBG(LOGERROR, "failed to alloc rxbuff\r\n"); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + rx_urb_sched = true; + return -1; + } + rx_buff->len = 0; + rx_buff->start = rx_buff->data; + rx_buff->read = rx_buff->start; + rx_buff->end = rx_buff->data + aicwf_rxbuff_size_get(); + + usb_buf->rx_buff = rx_buff; + + usb_fill_bulk_urb(usb_buf->urb, + usb_dev->udev, + usb_dev->bulk_in_pipe, + rx_buff->data, aicwf_rxbuff_size_get(), aicwf_usb_rx_complete, usb_buf); + + usb_buf->usbdev = usb_dev; + + usb_anchor_urb(usb_buf->urb, &usb_dev->rx_submitted); + ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC); + if (ret) { + usb_err("usb submit rx urb fail:%d\n", ret); + usb_unanchor_urb(usb_buf->urb); + aicwf_prealloc_rxbuff_free(rx_buff, &usb_dev->rx_priv->rxbuff_lock); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + + msleep(100); + return -1; + }else{ + atomic_inc(&rx_urb_cnt); + } + return 0; +} + +#else +static int aicwf_usb_submit_rx_urb(struct aic_usb_dev *usb_dev, + struct aicwf_usb_buf *usb_buf) +{ + struct sk_buff *skb; + int ret; + + if (!usb_buf || !usb_dev) + return -1; + + if (usb_dev->state != USB_UP_ST) { + usb_err("usb state is not up!\n"); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + return -1; + } + + if(aicwf_usb_rx_aggr){ + skb = __dev_alloc_skb(AICWF_USB_AGGR_MAX_PKT_SIZE, GFP_ATOMIC/*GFP_KERNEL*/); + } else { + skb = __dev_alloc_skb(aicwf_usb_max_pkt_size, GFP_ATOMIC/*GFP_KERNEL*/); + } + if (!skb) { + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + return -1; + } + + usb_buf->skb = skb; + + if (aicwf_usb_rx_aggr) { + usb_fill_bulk_urb(usb_buf->urb, + usb_dev->udev, + usb_dev->bulk_in_pipe, + skb->data, AICWF_USB_AGGR_MAX_PKT_SIZE, aicwf_usb_rx_complete, usb_buf); + } else { + usb_fill_bulk_urb(usb_buf->urb, + usb_dev->udev, + usb_dev->bulk_in_pipe, + skb->data, aicwf_usb_max_pkt_size, aicwf_usb_rx_complete, usb_buf); + } + + usb_buf->usbdev = usb_dev; + + usb_anchor_urb(usb_buf->urb, &usb_dev->rx_submitted); + ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC); + if (ret) { + usb_err("usb submit rx urb fail:%d\n", ret); + usb_unanchor_urb(usb_buf->urb); + aicwf_dev_skb_free(usb_buf->skb); + usb_buf->skb = NULL; + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + + msleep(100); + return -1; + }else{ + atomic_inc(&rx_urb_cnt); + } + return 0; +} +#endif + +static void aicwf_usb_rx_submit_all_urb(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf; +// int i = 0; + + if (usb_dev->state != USB_UP_ST) { + AICWFDBG(LOGERROR, "bus is not up=%d\n", usb_dev->state); + return; + } + + while((usb_buf = aicwf_usb_rx_buf_get(usb_dev)) != NULL) { + if (aicwf_usb_submit_rx_urb(usb_dev, usb_buf)) { + AICWFDBG(LOGERROR, "sub rx fail\n"); + return; + #if 0 + AICWFDBG(LOGERROR, "usb rx refill fail\n"); + if (usb_dev->state != USB_UP_ST) + return; + #endif + } + } + usb_dev->rx_prepare_ready = true; +} + +#ifdef CONFIG_USB_MSG_IN_EP +static int aicwf_usb_submit_msg_rx_urb(struct aic_usb_dev *usb_dev, + struct aicwf_usb_buf *usb_buf) +{ + struct sk_buff *skb; + int ret; + + if (!usb_buf || !usb_dev) + return -1; + + if (usb_dev->state != USB_UP_ST) { + AICWFDBG(LOGERROR, "usb state is not up!\n"); + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + return -1; + } + + skb = __dev_alloc_skb(AICWF_USB_MSG_MAX_PKT_SIZE, GFP_ATOMIC); + if (!skb) { + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + return -1; + } + + usb_buf->skb = skb; + + usb_fill_bulk_urb(usb_buf->urb, + usb_dev->udev, + usb_dev->msg_in_pipe, + skb->data, AICWF_USB_MSG_MAX_PKT_SIZE, aicwf_usb_msg_rx_complete, usb_buf); + + usb_buf->usbdev = usb_dev; + + usb_anchor_urb(usb_buf->urb, &usb_dev->msg_rx_submitted); + ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC); + if (ret) { + AICWFDBG(LOGERROR, "usb submit msg rx urb fail:%d\n", ret); + usb_unanchor_urb(usb_buf->urb); + aicwf_dev_skb_free(usb_buf->skb); + usb_buf->skb = NULL; + aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf); + + msleep(100); + } + return 0; +} + + +static void aicwf_usb_msg_rx_submit_all_urb(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf; + + if (usb_dev->state != USB_UP_ST) { + AICWFDBG(LOGERROR, "bus is not up=%d\n", usb_dev->state); + return; + } + + while((usb_buf = aicwf_usb_msg_rx_buf_get(usb_dev)) != NULL) { + if (aicwf_usb_submit_msg_rx_urb(usb_dev, usb_buf)) { + AICWFDBG(LOGERROR, "usb msg rx refill fail\n"); + if (usb_dev->state != USB_UP_ST) + return; + } + } +} +#endif + +#ifdef CONFIG_USB_MSG_IN_EP +void aicwf_usb_msg_rx_submit_all_urb_(struct aic_usb_dev *usb_dev){ + aicwf_usb_msg_rx_submit_all_urb(usb_dev); +} +#endif + +void aicwf_usb_rx_submit_all_urb_(struct aic_usb_dev *usb_dev){ + aicwf_usb_rx_submit_all_urb(usb_dev); +} + + +static void aicwf_usb_rx_prepare(struct aic_usb_dev *usb_dev) +{ + aicwf_usb_rx_submit_all_urb(usb_dev); +} + +#ifdef CONFIG_USB_MSG_IN_EP +static void aicwf_usb_msg_rx_prepare(struct aic_usb_dev *usb_dev) +{ + aicwf_usb_msg_rx_submit_all_urb(usb_dev); +} +#endif + + +static void aicwf_usb_tx_prepare(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf; + + while(!list_empty(&usb_dev->tx_post_list)){ + usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + #ifndef CONFIG_USB_TX_AGGR + if(usb_buf->skb) { + dev_kfree_skb(usb_buf->skb); + usb_buf->skb = NULL; + } + #endif + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + } +} + +#ifdef CONFIG_USB_TX_AGGR +int aicwf_usb_send_pkt(struct aic_usb_dev *usb_dev, u8 *buf, uint buf_len) +{ + int ret = 0; + struct aicwf_usb_buf *usb_buf; + unsigned long flags; + bool need_cfm = false; + + if (usb_dev->state != USB_UP_ST) { + AICWFDBG(LOGERROR, "usb state is not up!\n"); + return -EIO; + } + + usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_free_list, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + if (!usb_buf) { + AICWFDBG(LOGERROR, "free:%d, post:%d\n", usb_dev->tx_free_count, usb_dev->tx_post_count); + ret = -ENOMEM; + goto flow_ctrl; + } + + usb_buf->skb = (struct sk_buff *)buf; + usb_buf->usbdev = usb_dev; + if (need_cfm) + usb_buf->cfm = true; + else + usb_buf->cfm = false; + AICWFDBG(LOGERROR, "%s len %d\n", __func__, buf_len); + print_hex_dump(KERN_ERR, "buf ", DUMP_PREFIX_NONE, 16, 1, &buf[0], 32, false); + usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe, + buf, buf_len, aicwf_usb_tx_complete, usb_buf); + usb_buf->urb->transfer_flags |= URB_ZERO_PACKET; + + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + ret = 0; + + flow_ctrl: + spin_lock_irqsave(&usb_dev->tx_flow_lock, flags); + if (usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER) { + usb_dev->tbusy = true; + aicwf_usb_tx_flowctrl(usb_dev->rwnx_hw, true); + } + spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags); + + return ret; +} + +int aicwf_usb_aggr(struct aicwf_tx_priv *tx_priv, struct sk_buff *pkt) +{ + struct rwnx_txhdr *txhdr = (struct rwnx_txhdr *)pkt->data; + u8 usb_header[8]; + u8 adjust_str[4] = {0, 0, 0, 0}; + u32 curr_len = 0; + int allign_len = 0; + u32 data_len = (pkt->len - sizeof(struct rwnx_txhdr) + sizeof(struct txdesc_api)) + 4; + + usb_header[0] =(data_len & 0xff); + usb_header[1] =((data_len >> 8)&0x0f); + usb_header[2] =(data_len & 0xff); + usb_header[3] =((data_len >> 8)&0x0f); + + usb_header[4] =(data_len & 0xff); + usb_header[5] =((data_len >> 8)&0x0f); + usb_header[6] = 0x01; //data + usb_header[7] = 0; //reserved + + memcpy(tx_priv->tail, (u8 *)&usb_header, sizeof(usb_header)); + tx_priv->tail += sizeof(usb_header); + //payload + memcpy(tx_priv->tail, (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api)); + tx_priv->tail += sizeof(struct txdesc_api); //hostdesc + memcpy(tx_priv->tail, (u8 *)((u8 *)txhdr + txhdr->sw_hdr->headroom), pkt->len-txhdr->sw_hdr->headroom); + tx_priv->tail += (pkt->len - txhdr->sw_hdr->headroom); + + //word alignment + curr_len = tx_priv->tail - tx_priv->head; + if (curr_len & (TX_ALIGNMENT - 1)) { + allign_len = roundup(curr_len, TX_ALIGNMENT)-curr_len; + memcpy(tx_priv->tail, adjust_str, allign_len); + tx_priv->tail += allign_len; + } + + tx_priv->aggr_buf->dev = pkt->dev; + + if(!txhdr->sw_hdr->need_cfm) { + kmem_cache_free(txhdr->sw_hdr->rwnx_vif->rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr); + skb_pull(pkt, txhdr->sw_hdr->headroom); + consume_skb(pkt); + } + + atomic_inc(&tx_priv->aggr_count); + return 0; +} + +int aicwf_usb_send(struct aicwf_tx_priv *tx_priv) +{ + struct sk_buff *pkt; + struct sk_buff *tx_buf; + struct aic_usb_dev *usbdev = tx_priv->usbdev; + struct aicwf_usb_buf *usb_buf; + u8* buf; + int ret = 0; + int curr_len = 0; + unsigned long flags; + + if (aicwf_is_framequeue_empty(&tx_priv->txq)) { + ret = -1; + AICWFDBG(LOGERROR, "no buf to send\n"); + return ret; + } + + if (usbdev->state != USB_UP_ST) { + AICWFDBG(LOGERROR, "usb state is not up!\n"); + ret = -ENODEV; + return ret; + } + usb_buf = aicwf_usb_tx_dequeue(usbdev, &usbdev->tx_free_list, + &usbdev->tx_free_count, &usbdev->tx_free_lock); + if (!usb_buf) { + AICWFDBG(LOGERROR, "free:%d, post:%d\n", usbdev->tx_free_count, usbdev->tx_post_count); + ret = -ENOMEM; + return ret; + } + + usb_buf->aggr_cnt = 0; + spin_lock_bh(&usbdev->tx_priv->txdlock); + tx_priv->head = usb_buf->skb->data; + tx_priv->tail = usb_buf->skb->data; + + while (!aicwf_is_framequeue_empty(&usbdev->tx_priv->txq)) { + if (usbdev->state != USB_UP_ST) { + AICWFDBG(LOGERROR, "usb state is not up, break!\n"); + ret = -ENODEV; + break; + } + + if (usb_buf->aggr_cnt == 10) { + break; + } + spin_lock_bh(&usbdev->tx_priv->txqlock); + pkt = aicwf_frame_dequeue(&usbdev->tx_priv->txq); + if (pkt == NULL) { + AICWFDBG(LOGERROR, "txq no pkt\n"); + spin_unlock_bh(&usbdev->tx_priv->txqlock); + ret = -1; + return ret; + } + atomic_dec(&usbdev->tx_priv->tx_pktcnt); + spin_unlock_bh(&usbdev->tx_priv->txqlock); + if(tx_priv==NULL || tx_priv->tail==NULL || pkt==NULL) { + AICWFDBG(LOGERROR, "null error\n"); + } + aicwf_usb_aggr(tx_priv, pkt); + usb_buf->aggr_cnt++; + } + + tx_buf = usb_buf->skb; + buf = tx_buf->data; + + curr_len = tx_priv->tail - tx_priv->head; + + AICWFDBG(LOGTRACE, "%s len %d, cnt %d\n", __func__,curr_len, usb_buf->aggr_cnt); + tx_buf->len = tx_priv->tail - tx_priv->head; + spin_unlock_bh(&usbdev->tx_priv->txdlock); + usb_fill_bulk_urb(usb_buf->urb, usbdev->udev, usbdev->bulk_out_pipe, + buf, curr_len, aicwf_usb_tx_complete, usb_buf); + usb_buf->urb->transfer_flags |= URB_ZERO_PACKET; + + aicwf_usb_tx_queue(usbdev, &usbdev->tx_post_list, usb_buf, + &usbdev->tx_post_count, &usbdev->tx_post_lock); + + flow_ctrl: + spin_lock_irqsave(&usbdev->tx_flow_lock, flags); + if (usbdev->tx_free_count < AICWF_USB_TX_LOW_WATER) { + usbdev->tbusy = true; + aicwf_usb_tx_flowctrl(usbdev->rwnx_hw, true); + } + spin_unlock_irqrestore(&usbdev->tx_flow_lock, flags); + + return ret; +} + +#endif + +static void aicwf_usb_tx_process(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf; + int ret = 0; + u8* data = NULL; + +#ifdef CONFIG_USB_TX_AGGR + if (!aicwf_is_framequeue_empty(&usb_dev->tx_priv->txq)) { + if (aicwf_usb_send(usb_dev->tx_priv)) { + AICWFDBG(LOGERROR, "%s no buf send\n", __func__); + } + } +#endif + + while(!list_empty(&usb_dev->tx_post_list)) { + + if (usb_dev->state != USB_UP_ST) { + usb_err("usb state is not up!\n"); + return; + } + + usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + if(!usb_buf) { + usb_err("can not get usb_buf from tx_post_list!\n"); + return; + } + data = usb_buf->skb->data; + + ret = usb_submit_urb(usb_buf->urb, GFP_KERNEL); + if (ret) { + AICWFDBG(LOGERROR, "aicwf_usb_bus_tx usb_submit_urb FAILED err:%d\n", ret); + #ifdef CONFIG_USB_TX_AGGR + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + break; + #else + goto fail; + #endif + } + + continue; +#ifndef CONFIG_USB_TX_AGGR +fail: + usb_txc_sta_flowctrl(usb_buf, usb_dev); + dev_kfree_skb(usb_buf->skb); + usb_buf->skb = NULL; + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); +#endif + } +} + +#ifdef CONFIG_TX_TASKLET +void aicwf_tasklet_tx_process(struct aic_usb_dev *usb_dev){ + aicwf_usb_tx_process(usb_dev); +} +#endif + +static inline void aic_thread_wait_stop(void) +{ +#if 1// PLATFORM_LINUX + #if 0 + while (!kthread_should_stop()) + rtw_msleep_os(10); + #else + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + #endif +#endif +} + + +int usb_bustx_thread(void *data) +{ + struct aicwf_bus *bus = (struct aicwf_bus *)data; + struct aic_usb_dev *usbdev = bus->bus_priv.usb; + int set_cpu_ret = 0; + +#ifdef CONFIG_THREAD_INFO_IN_TASK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)) + AICWFDBG(LOGINFO, "%s the cpu is:%d\n", __func__, current->thread_info.cpu); +#else + AICWFDBG(LOGINFO, "%s the cpu is:%d\n", __func__, current->cpu); +#endif +#endif + set_cpu_ret = set_cpus_allowed_ptr(current, cpumask_of(1)); +#ifdef CONFIG_THREAD_INFO_IN_TASK + AICWFDBG(LOGINFO, "%s set_cpu_ret is:%d\n", __func__, set_cpu_ret); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)) + AICWFDBG(LOGINFO, "%s change cpu to:%d\n", __func__, current->thread_info.cpu); +#else + AICWFDBG(LOGINFO, "%s change cpu to:%d\n", __func__, current->cpu); +#endif +#endif + + +#ifdef CONFIG_TXRX_THREAD_PRIO + if (bustx_thread_prio > 0) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) + sched_set_fifo_low(current); +#else + struct sched_param param; + param.sched_priority = (bustx_thread_prio < MAX_RT_PRIO)?bustx_thread_prio:(MAX_RT_PRIO-1); + sched_setscheduler(current, SCHED_FIFO, ¶m); +#endif + } +#endif + AICWFDBG(LOGINFO, "%s the policy of current thread is:%d\n", __func__, current->policy); + AICWFDBG(LOGINFO, "%s the rt_priority of current thread is:%d\n", __func__, current->rt_priority); + AICWFDBG(LOGINFO, "%s the current pid is:%d\n", __func__, current->pid); + + + while (1) { + #if 0 + if(kthread_should_stop()) { + usb_err("usb bustx thread stop 2\n"); + break; + } + #endif + if (!wait_for_completion_interruptible(&bus->bustx_trgg)) { + if(usbdev->bus_if->state == BUS_DOWN_ST){ + AICWFDBG(LOGINFO, "usb bustx thread will to stop\n"); + break; + } + #ifdef CONFIG_USB_TX_AGGR + if ((usbdev->tx_post_count > 0) || !aicwf_is_framequeue_empty(&usbdev->tx_priv->txq)) + #else + if (usbdev->tx_post_count > 0) + #endif + aicwf_usb_tx_process(usbdev); + } + } + + aic_thread_wait_stop(); + AICWFDBG(LOGINFO, "usb bustx thread stop\n"); + + return 0; +} + +int usb_busrx_thread(void *data) +{ + struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data; + struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if; + int set_cpu_ret = 0; + +#ifdef CONFIG_THREAD_INFO_IN_TASK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)) + AICWFDBG(LOGINFO, "%s the cpu is:%d\n", __func__, current->thread_info.cpu); +#else + AICWFDBG(LOGINFO, "%s the cpu is:%d\n", __func__, current->cpu); +#endif +#endif + set_cpu_ret = set_cpus_allowed_ptr(current, cpumask_of(1)); +#ifdef CONFIG_THREAD_INFO_IN_TASK + AICWFDBG(LOGINFO, "%s set_cpu_ret is:%d\n", __func__, set_cpu_ret); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)) + AICWFDBG(LOGINFO, "%s change cpu to:%d\n", __func__, current->thread_info.cpu); +#else + AICWFDBG(LOGINFO, "%s change cpu to:%d\n", __func__, current->cpu); +#endif +#endif + +#ifdef CONFIG_TXRX_THREAD_PRIO + if (busrx_thread_prio > 0) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) + sched_set_fifo_low(current); +#else + struct sched_param param; + param.sched_priority = (busrx_thread_prio < MAX_RT_PRIO)?busrx_thread_prio:(MAX_RT_PRIO-1); + sched_setscheduler(current, SCHED_FIFO, ¶m); +#endif + } +#endif + AICWFDBG(LOGINFO, "%s the policy of current thread is:%d\n", __func__, current->policy); + AICWFDBG(LOGINFO, "%s the rt_priority of current thread is:%d\n", __func__, current->rt_priority); + AICWFDBG(LOGINFO, "%s the current pid is:%d\n", __func__, current->pid); + + while (1) { +#if 0 + if(kthread_should_stop()) { + usb_err("usb busrx thread stop 2\n"); + break; + } +#endif + //rx_priv->rx_thread_working = 0;//AIDEN + if (!wait_for_completion_interruptible(&bus_if->busrx_trgg)) { + if(bus_if->state == BUS_DOWN_ST){ + AICWFDBG(LOGINFO, "usb busrx thread will to stop\n"); + break; + } + //rx_priv->rx_thread_working = 1;//AIDEN + aicwf_process_rxframes(rx_priv); + } + } + + aic_thread_wait_stop(); + AICWFDBG(LOGINFO, "usb busrx thread stop\n"); + + return 0; +} + +#ifdef CONFIG_USB_MSG_IN_EP +int usb_msg_busrx_thread(void *data) +{ + struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data; + struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if; + +#ifdef CONFIG_TXRX_THREAD_PRIO + if (busrx_thread_prio > 0) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) + sched_set_fifo_low(current); +#else + struct sched_param param; + param.sched_priority = (busrx_thread_prio < MAX_RT_PRIO)?busrx_thread_prio:(MAX_RT_PRIO-1); + sched_setscheduler(current, SCHED_FIFO, ¶m); +#endif + } +#endif + AICWFDBG(LOGINFO, "%s the policy of current thread is:%d\n", __func__, current->policy); + AICWFDBG(LOGINFO, "%s the rt_priority of current thread is:%d\n", __func__, current->rt_priority); + AICWFDBG(LOGINFO, "%s the current pid is:%d\n", __func__, current->pid); + + + + while (1) { +#if 0 + if(kthread_should_stop()) { + usb_err("usb msg busrx thread stop\n"); + break; + } +#endif + if (!wait_for_completion_interruptible(&bus_if->msg_busrx_trgg)) { + if(bus_if->state == BUS_DOWN_ST){ + AICWFDBG(LOGINFO, "usb msg busrx thread will to stop\n"); + break; + } + aicwf_process_msg_rxframes(rx_priv); + } + } + + aic_thread_wait_stop(); + AICWFDBG(LOGINFO, "usb msg busrx thread stop\n"); + + return 0; +} +#endif + + +static void aicwf_usb_send_msg_complete(struct urb *urb) +{ + struct aic_usb_dev *usb_dev = (struct aic_usb_dev *) urb->context; + + usb_dev->msg_finished = true; + if (waitqueue_active(&usb_dev->msg_wait)) + wake_up(&usb_dev->msg_wait); +} + +static int aicwf_usb_bus_txmsg(struct device *dev, u8 *buf, u32 len) +{ + int ret = 0; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + + if (usb_dev->state != USB_UP_ST) + return -EIO; + + if (buf == NULL || len == 0 || usb_dev->msg_out_urb == NULL) + return -EINVAL; + +#if 0 + if (test_and_set_bit(0, &usb_dev->msg_busy)) { + usb_err("In a control frame option, can't tx!\n"); + return -EIO; + } +#endif + + usb_dev->msg_finished = false; + +#ifdef CONFIG_USB_MSG_OUT_EP + if (usb_dev->msg_out_pipe) { + usb_fill_bulk_urb(usb_dev->msg_out_urb, + usb_dev->udev, + usb_dev->msg_out_pipe, + buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev); + } else { + usb_fill_bulk_urb(usb_dev->msg_out_urb, + usb_dev->udev, + usb_dev->bulk_out_pipe, + buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev); + } +#else + usb_fill_bulk_urb(usb_dev->msg_out_urb, + usb_dev->udev, + usb_dev->bulk_out_pipe, + buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev); +#endif + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + usb_dev->msg_out_urb->transfer_dma = usb_dev->cmd_dma_trans_addr; + usb_dev->msg_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + #endif +#ifdef CONFIG_USE_USB_ZERO_PACKET + usb_dev->msg_out_urb->transfer_flags |= URB_ZERO_PACKET; +#endif + ret = usb_submit_urb(usb_dev->msg_out_urb, GFP_ATOMIC); + if (ret) { + usb_err("usb_submit_urb failed %d\n", ret); + goto exit; + } + + ret = wait_event_timeout(usb_dev->msg_wait, + usb_dev->msg_finished, msecs_to_jiffies(CMD_TX_TIMEOUT)); + if (!ret) { + if (usb_dev->msg_out_urb) + usb_kill_urb(usb_dev->msg_out_urb); + usb_err("Txmsg wait timed out\n"); + ret = -EIO; + goto exit; + } + + if (usb_dev->msg_finished == false) { + usb_err("Txmsg timed out\n"); + ret = -ETIMEDOUT; + goto exit; + } +exit: +#if 0 + clear_bit(0, &usb_dev->msg_busy); +#endif + return ret; +} + + +static void aicwf_usb_free_urb(struct list_head *q, spinlock_t *qlock) +{ + struct aicwf_usb_buf *usb_buf, *tmp; + unsigned long flags; + + spin_lock_irqsave(qlock, flags); + list_for_each_entry_safe(usb_buf, tmp, q, list) { + spin_unlock_irqrestore(qlock, flags); + if (!usb_buf->urb) { + usb_err("bad usb_buf\n"); + spin_lock_irqsave(qlock, flags); + break; + } + #ifdef CONFIG_USB_TX_AGGR + if (usb_buf->skb) { + dev_kfree_skb(usb_buf->skb); + } + #endif + usb_free_urb(usb_buf->urb); + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + // free dma buf if needed + if (usb_buf->data_buf) { + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + usb_free_coherent(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr); + #else + usb_buffer_free(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr); + #endif + usb_buf->data_buf = NULL; + usb_buf->data_dma_trans_addr = 0x0; + } + #endif + list_del_init(&usb_buf->list); + spin_lock_irqsave(qlock, flags); + } + spin_unlock_irqrestore(qlock, flags); +} + +static int aicwf_usb_alloc_rx_urb(struct aic_usb_dev *usb_dev) +{ + int i; + + AICWFDBG(LOGINFO, "%s AICWF_USB_RX_URBS:%d \r\n", __func__, AICWF_USB_RX_URBS); + for (i = 0; i < AICWF_USB_RX_URBS; i++) { + struct aicwf_usb_buf *usb_buf = &usb_dev->usb_rx_buf[i]; + + usb_buf->usbdev = usb_dev; + usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!usb_buf->urb) { + usb_err("could not allocate rx data urb\n"); + goto err; + } + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + // dma buf unused + usb_buf->data_buf = NULL; + usb_buf->data_dma_trans_addr = 0x0; + #endif + list_add_tail(&usb_buf->list, &usb_dev->rx_free_list); + } + return 0; + +err: + aicwf_usb_free_urb(&usb_dev->rx_free_list, &usb_dev->rx_free_lock); + return -ENOMEM; +} + +static int aicwf_usb_alloc_tx_urb(struct aic_usb_dev *usb_dev) +{ + int i; + + AICWFDBG(LOGINFO, "%s AICWF_USB_TX_URBS:%d \r\n", __func__, AICWF_USB_TX_URBS); + for (i = 0; i < AICWF_USB_TX_URBS; i++) { + struct aicwf_usb_buf *usb_buf = &usb_dev->usb_tx_buf[i]; + + usb_buf->usbdev = usb_dev; + usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!usb_buf->urb) { + usb_err("could not allocate tx data urb\n"); + goto err; + } + #ifdef CONFIG_USB_TX_AGGR + usb_buf->skb = dev_alloc_skb(MAX_USB_AGGR_TXPKT_LEN); + #endif + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + // alloc dma buf + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + usb_buf->data_buf = usb_alloc_coherent(usb_dev->udev, DATA_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &usb_buf->data_dma_trans_addr); + #else + usb_buf->data_buf = usb_buffer_alloc(usb_dev->udev, DATA_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &usb_buf->data_dma_trans_addr); + #endif + if (usb_buf->data_buf == NULL) { + usb_err("could not allocate tx data dma buf\n"); + goto err; + } + #endif + list_add_tail(&usb_buf->list, &usb_dev->tx_free_list); + (usb_dev->tx_free_count)++; + } + return 0; + +err: + aicwf_usb_free_urb(&usb_dev->tx_free_list, &usb_dev->tx_free_lock); + return -ENOMEM; +} + +#ifdef CONFIG_USB_MSG_IN_EP +static int aicwf_usb_alloc_msg_rx_urb(struct aic_usb_dev *usb_dev) +{ + int i; + + AICWFDBG(LOGINFO, "%s AICWF_USB_MSG_RX_URBS:%d \r\n", __func__, AICWF_USB_MSG_RX_URBS); + + for (i = 0; i < AICWF_USB_MSG_RX_URBS; i++) { + struct aicwf_usb_buf *usb_buf = &usb_dev->usb_msg_rx_buf[i]; + + usb_buf->usbdev = usb_dev; + usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!usb_buf->urb) { + usb_err("could not allocate rx data urb\n"); + goto err; + } + list_add_tail(&usb_buf->list, &usb_dev->msg_rx_free_list); + } + return 0; + +err: + aicwf_usb_free_urb(&usb_dev->msg_rx_free_list, &usb_dev->msg_rx_free_lock); + return -ENOMEM; +} +#endif + +static void aicwf_usb_state_change(struct aic_usb_dev *usb_dev, int state) +{ + int old_state; + + if (usb_dev->state == state) + return; + + old_state = usb_dev->state; + usb_dev->state = state; + + if (state == USB_DOWN_ST) { + usb_dev->bus_if->state = BUS_DOWN_ST; + } + if (state == USB_UP_ST) { + usb_dev->bus_if->state = BUS_UP_ST; + } +} + +int align_param = 8; +module_param(align_param, int, 0660); + +static void usb_tx_flow_ctrl(struct rwnx_txhdr *txhdr, struct aic_usb_dev *usb_dev, struct rwnx_hw *rwnx_hw) +{ +#ifdef CONFIG_PER_STA_FC + struct rwnx_sta *sta; + u8 sta_idx; + unsigned long flags; + + //printk("txdata: sta %d\n", txhdr->sw_hdr->desc.host.staid); + sta_idx = txhdr->sw_hdr->desc.host.staid; + if(sta_idx < NX_REMOTE_STA_MAX && !(txhdr->sw_hdr->desc.host.flags & TXU_CNTRL_MGMT)) { + struct rwnx_vif *vif = NULL; + sta = &rwnx_hw->sta_table[sta_idx]; + vif = rwnx_hw->vif_table[sta->vif_idx]; + spin_lock_irqsave(&usb_dev->tx_flow_lock, flags); + atomic_inc(&rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt); + //printk("sta %d pending %d >= 64, flowctrl=%d\n", sta->sta_idx, sta->tx_pending_cnt, sta->flowctrl); + if(RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP) { + if((atomic_read(&rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt) >= AICWF_USB_FC_PERSTA_HIGH_WATER && rwnx_hw->sta_flowctrl[sta_idx].flowctrl == 0) || + rwnx_hw->sta_flowctrl[sta_idx].flowctrl) { + //AICWFDBG(LOGDEBUG, "sta 0x%x:0x%x, %d pending %d, stop\n", sta->mac_addr[4], sta->mac_addr[5], sta->sta_idx, atomic_read(&rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt)); + if(!usb_dev->tbusy) + rwnx_stop_sta_all_queues(sta, usb_dev->rwnx_hw); + rwnx_hw->sta_flowctrl[sta_idx].flowctrl = 1; + } + } + spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags); + } +#endif +} + +#ifdef CONFIG_USB_TX_AGGR +static int aicwf_usb_bus_txdata(struct device *dev, struct sk_buff *pkt) +{ + uint prio; + int ret = -EBADE; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usbdev = bus_if->bus_priv.usb; + + //printk("%s\n", __func__); + prio = (pkt->priority & 0x7); + spin_lock_bh(&usbdev->tx_priv->txqlock); + if (!aicwf_frame_enq(usbdev->dev, &usbdev->tx_priv->txq, pkt, prio)) { + aicwf_dev_skb_free(pkt); + spin_unlock_bh(&usbdev->tx_priv->txqlock); + return -ENOSR; + } else { + ret = 0; + } + + if (bus_if->state != BUS_UP_ST) { + usb_err("bus_if stopped\n"); + spin_unlock_bh(&usbdev->tx_priv->txqlock); + return -1; + } + + atomic_inc(&usbdev->tx_priv->tx_pktcnt); + spin_unlock_bh(&usbdev->tx_priv->txqlock); + complete(&bus_if->bustx_trgg); + + return ret; +} + +#else +static int aicwf_usb_bus_txdata(struct device *dev, struct sk_buff *skb) +{ + u8 *buf; + u16 buf_len = 0; + u16 adjust_len = 0; + struct aicwf_usb_buf *usb_buf; + int ret = 0; + unsigned long flags; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + struct rwnx_txhdr *txhdr = (struct rwnx_txhdr *)skb->data; + struct rwnx_hw *rwnx_hw = usb_dev->rwnx_hw; + u8 usb_header[4]; + u8 adj_buf[4] = {0}; + u16 index = 0; + bool need_cfm = false; +#ifdef CONFIG_USB_ALIGN_DATA//AIDEN + u8 *buf_align; + int align; +#endif + + if (usb_dev->state != USB_UP_ST) { + usb_err("usb state is not up!\n"); + kmem_cache_free(rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr); + dev_kfree_skb_any(skb); + return -EIO; + } + + usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_free_list, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + if (!usb_buf) { + usb_err("free:%d, post:%d\n", usb_dev->tx_free_count, usb_dev->tx_post_count); + kmem_cache_free(rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto flow_ctrl; + } + + usb_tx_flow_ctrl(txhdr, usb_dev, rwnx_hw); + + if (txhdr->sw_hdr->need_cfm) { + need_cfm = true; + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + buf = usb_buf->data_buf; + #else + buf = kmalloc(skb->len + 1, GFP_ATOMIC/*GFP_KERNEL*/); + #endif + index += sizeof(usb_header); + memcpy(&buf[index], (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api)); + index += sizeof(struct txdesc_api); + memcpy(&buf[index], &skb->data[txhdr->sw_hdr->headroom], skb->len - txhdr->sw_hdr->headroom); + index += skb->len - txhdr->sw_hdr->headroom; + buf_len = index; + if (buf_len & (TX_ALIGNMENT - 1)) { + adjust_len = roundup(buf_len, TX_ALIGNMENT)-buf_len; + memcpy(&buf[buf_len], adj_buf, adjust_len); + buf_len += adjust_len; + } + usb_header[0] =((buf_len) & 0xff); + usb_header[1] =(((buf_len) >> 8)&0x0f); + usb_header[2] = 0x01; //data + usb_header[3] = 0; //reserved + memcpy(&buf[0], usb_header, sizeof(usb_header)); + usb_buf->skb = (struct sk_buff *)buf; + } else { + skb_pull(skb, txhdr->sw_hdr->headroom); + skb_push(skb, sizeof(struct txdesc_api)); + memcpy(&skb->data[0], (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api)); + kmem_cache_free(rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr); + + skb_push(skb, sizeof(usb_header)); + usb_header[0] =((skb->len) & 0xff); + usb_header[1] =(((skb->len) >> 8)&0x0f); + usb_header[2] = 0x01; //data + usb_header[3] = 0; //reserved + memcpy(&skb->data[0], usb_header, sizeof(usb_header)); + + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + buf = usb_buf->data_buf; + memcpy(&buf[0], skb->data, skb->len); + #else + buf = skb->data; + #endif + buf_len = skb->len; + + usb_buf->skb = skb; + } + usb_buf->usbdev = usb_dev; + if (need_cfm) + usb_buf->cfm = true; + else + usb_buf->cfm = false; + + +#ifndef CONFIG_USE_USB_ZERO_PACKET + if((buf_len % 512) == 0){ + printk("%s send zero package buf_len: %d\r\n", __func__, buf_len); + if(txhdr->sw_hdr->need_cfm){ + buf[buf_len] = 0x00; + buf_len = buf_len + 1; + }else{ + skb_put(skb, 1); + skb->data[buf_len] = 0x00; + buf = skb->data; + buf_len = skb->len; + } + } +#endif + +#ifdef CONFIG_USB_ALIGN_DATA + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + #error "CONFIG_USB_NO_TRANS_DMA_MAP not supported" + #endif +#if 0 + usb_buf->usb_align_data = (u8*)kmalloc(sizeof(u8) * buf_len + align_param, GFP_ATOMIC); + + align = ((unsigned long)(usb_buf->usb_align_data)) & (align_param - 1); + memcpy(usb_buf->usb_align_data + (align_param - align), buf, buf_len); + + usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe, + usb_buf->usb_align_data + (align_param - align), buf_len, aicwf_usb_tx_complete, usb_buf); +#else + if (!IS_ALIGNED((unsigned long)buf, align_param)) { + usb_buf->usb_align_data = (u8*)kmalloc(sizeof(u8) * buf_len + align_param, GFP_ATOMIC); + if (usb_buf->usb_align_data) { + align = ((unsigned long)(usb_buf->usb_align_data)) & (align_param - 1); + buf_align = usb_buf->usb_align_data + (align_param - align); + memcpy(buf_align, buf, buf_len); + } + } else { + buf_align = buf; + } + + usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe, + buf_align, buf_len, aicwf_usb_tx_complete, usb_buf); +#endif +#else + usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe, + buf, buf_len, aicwf_usb_tx_complete, usb_buf); +#endif + + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + usb_buf->urb->transfer_dma = usb_buf->data_dma_trans_addr; + usb_buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + #endif +#ifdef CONFIG_USE_USB_ZERO_PACKET + usb_buf->urb->transfer_flags |= URB_ZERO_PACKET; +#endif + + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + +#ifdef CONFIG_TX_TASKLET + tasklet_schedule(&usb_dev->xmit_tasklet); +#else + complete(&bus_if->bustx_trgg); +#endif + + ret = 0; + + flow_ctrl: + spin_lock_irqsave(&usb_dev->tx_flow_lock, flags); + if (usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER) { + AICWFDBG(LOGDEBUG, "usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER:%d\r\n", + usb_dev->tx_free_count); + usb_dev->tbusy = true; + aicwf_usb_tx_flowctrl(usb_dev->rwnx_hw, true); + } + spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags); + + return ret; +} +#endif +static int aicwf_usb_bus_start(struct device *dev) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + + if (usb_dev->state == USB_UP_ST) + return 0; + + aicwf_usb_state_change(usb_dev, USB_UP_ST); + + usb_dev->rx_prepare_ready = false; + aicwf_usb_rx_prepare(usb_dev); + aicwf_usb_tx_prepare(usb_dev); +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->msg_in_pipe){ + aicwf_usb_msg_rx_prepare(usb_dev); + } +#endif + if(!usb_dev->rx_prepare_ready){ + AICWFDBG(LOGERROR, "%s rx prepare fail\r\n", __func__); + return -1; + }else{ + return 0; + } +} + +static void aicwf_usb_cancel_all_urbs_(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf, *tmp; + unsigned long flags; + + if (usb_dev->msg_out_urb) + usb_kill_urb(usb_dev->msg_out_urb); + + spin_lock_irqsave(&usb_dev->tx_post_lock, flags); + list_for_each_entry_safe(usb_buf, tmp, &usb_dev->tx_post_list, list) { + spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags); + if (!usb_buf->urb) { + usb_err("bad usb_buf\n"); + spin_lock_irqsave(&usb_dev->tx_post_lock, flags); + break; + } + usb_kill_urb(usb_buf->urb); + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + // free dma buf if needed + if (usb_buf->data_buf) { + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + usb_free_coherent(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr); + #else + usb_buffer_free(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr); + #endif + usb_buf->data_buf = NULL; + usb_buf->data_dma_trans_addr = 0x0; + } else { + usb_err("bad usb dma buf\n"); + spin_lock_irqsave(&usb_dev->tx_post_lock, flags); + break; + } + #endif + spin_lock_irqsave(&usb_dev->tx_post_lock, flags); + } + spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags); + + usb_kill_anchored_urbs(&usb_dev->rx_submitted); +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->msg_in_pipe){ + usb_kill_anchored_urbs(&usb_dev->msg_rx_submitted); + } +#endif +} + +void aicwf_usb_cancel_all_urbs(struct aic_usb_dev *usb_dev){ + aicwf_usb_cancel_all_urbs_(usb_dev); +} + + +static void aicwf_usb_bus_stop(struct device *dev) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + + AICWFDBG(LOGINFO, "%s\r\n", __func__); + if (usb_dev == NULL) + return; + + if (usb_dev->state == USB_DOWN_ST) + return; + + if(g_rwnx_plat && g_rwnx_plat->wait_disconnect_cb == true){ + atomic_set(&aicwf_deinit_atomic, 1); + up(&aicwf_deinit_sem); + } + aicwf_usb_state_change(usb_dev, USB_DOWN_ST); + //aicwf_usb_cancel_all_urbs(usb_dev);//AIDEN +} + +static void aicwf_usb_deinit(struct aic_usb_dev *usbdev) +{ + cancel_work_sync(&usbdev->rx_urb_work); + aicwf_usb_free_urb(&usbdev->rx_free_list, &usbdev->rx_free_lock); + aicwf_usb_free_urb(&usbdev->tx_free_list, &usbdev->tx_free_lock); +#ifdef CONFIG_USB_MSG_IN_EP + if(usbdev->msg_in_pipe){ + cancel_work_sync(&usbdev->msg_rx_urb_work); + aicwf_usb_free_urb(&usbdev->msg_rx_free_list, &usbdev->msg_rx_free_lock); + } +#endif + + usb_free_urb(usbdev->msg_out_urb); +} + +static void aicwf_usb_rx_urb_work(struct work_struct *work) +{ + struct aic_usb_dev *usb_dev = container_of(work, struct aic_usb_dev, rx_urb_work); + + aicwf_usb_rx_submit_all_urb(usb_dev); +} + +#ifdef CONFIG_USB_MSG_IN_EP +static void aicwf_usb_msg_rx_urb_work(struct work_struct *work) +{ + struct aic_usb_dev *usb_dev = container_of(work, struct aic_usb_dev, msg_rx_urb_work); + + aicwf_usb_msg_rx_submit_all_urb(usb_dev); +} +#endif + +static int aicwf_usb_init(struct aic_usb_dev *usb_dev) +{ + int ret = 0; + + usb_dev->tbusy = false; + usb_dev->state = USB_DOWN_ST; + + init_waitqueue_head(&usb_dev->msg_wait); + init_usb_anchor(&usb_dev->rx_submitted); +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->msg_in_pipe){ + init_usb_anchor(&usb_dev->msg_rx_submitted); + } +#endif + + spin_lock_init(&usb_dev->tx_free_lock); + spin_lock_init(&usb_dev->tx_post_lock); + spin_lock_init(&usb_dev->rx_free_lock); + spin_lock_init(&usb_dev->tx_flow_lock); +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->msg_in_pipe){ + spin_lock_init(&usb_dev->msg_rx_free_lock); + } +#endif + + INIT_LIST_HEAD(&usb_dev->rx_free_list); + INIT_LIST_HEAD(&usb_dev->tx_free_list); + INIT_LIST_HEAD(&usb_dev->tx_post_list); +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->msg_in_pipe){ + INIT_LIST_HEAD(&usb_dev->msg_rx_free_list); + } +#endif + + atomic_set(&rx_urb_cnt, 0); + + usb_dev->tx_free_count = 0; + usb_dev->tx_post_count = 0; + + ret = aicwf_usb_alloc_rx_urb(usb_dev); + if (ret) { + goto error; + } + ret = aicwf_usb_alloc_tx_urb(usb_dev); + if (ret) { + goto error; + } +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->msg_in_pipe){ + ret = aicwf_usb_alloc_msg_rx_urb(usb_dev); + if (ret) { + goto error; + } + } +#endif + + + usb_dev->msg_out_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!usb_dev->msg_out_urb) { + usb_err("usb_alloc_urb (msg out) failed\n"); + ret = ENOMEM; + goto error; + } + + INIT_WORK(&usb_dev->rx_urb_work, aicwf_usb_rx_urb_work); +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->msg_in_pipe){ + INIT_WORK(&usb_dev->msg_rx_urb_work, aicwf_usb_msg_rx_urb_work); + } +#endif + + return ret; + error: + usb_err("failed!\n"); + aicwf_usb_deinit(usb_dev); + return ret; +} + + +static int aicwf_parse_usb(struct aic_usb_dev *usb_dev, struct usb_interface *interface) +{ + struct usb_interface_descriptor *interface_desc; + struct usb_host_interface *host_interface; + struct usb_endpoint_descriptor *endpoint; + struct usb_device *usb = usb_dev->udev; + int i, endpoints; + u8 endpoint_num; + int ret = 0; + + usb_dev->bulk_in_pipe = 0; + usb_dev->bulk_out_pipe = 0; +#ifdef CONFIG_USB_MSG_OUT_EP + usb_dev->msg_out_pipe = 0; +#endif +#ifdef CONFIG_USB_MSG_IN_EP + usb_dev->msg_in_pipe = 0; +#endif + + host_interface = &interface->altsetting[0]; + interface_desc = &host_interface->desc; + endpoints = interface_desc->bNumEndpoints; + AICWFDBG(LOGINFO, "%s endpoints = %d\n", __func__, endpoints); + + /* Check device configuration */ + if (usb->descriptor.bNumConfigurations != 1) { + usb_err("Number of configurations: %d not supported\n", + usb->descriptor.bNumConfigurations); + ret = -ENODEV; + goto exit; + } + + /* Check deviceclass */ +#ifndef CONFIG_USB_BT + if (usb->descriptor.bDeviceClass != 0x00) { + usb_err("DeviceClass %d not supported\n", + usb->descriptor.bDeviceClass); + ret = -ENODEV; + goto exit; + } +#endif + + /* Check interface number */ +#ifdef CONFIG_USB_BT + if (usb->actconfig->desc.bNumInterfaces != 3) +#else + if (usb->actconfig->desc.bNumInterfaces != 1) +#endif + { + AICWFDBG(LOGERROR, "Number of interfaces: %d not supported\n", + usb->actconfig->desc.bNumInterfaces); + if(usb_dev->chipid == PRODUCT_ID_AIC8800DC){ + AICWFDBG(LOGERROR, "AIC8800DC change to AIC8800DW\n"); + usb_dev->chipid = PRODUCT_ID_AIC8800DW; + } else if (usb_dev->chipid == PRODUCT_ID_AIC8800D81) { + AICWFDBG(LOGERROR, "AIC8800D80\n"); + } else if(usb_dev->chipid == PRODUCT_ID_AIC8800D81X2 || + usb_dev->chipid == PRODUCT_ID_AIC8800D89X2){ + //TODO + }else{ + ret = -ENODEV; + goto exit; + } + } + + if ((interface_desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || + (interface_desc->bInterfaceSubClass != 0xff) || + (interface_desc->bInterfaceProtocol != 0xff)) { + usb_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n", + interface_desc->bInterfaceNumber, interface_desc->bInterfaceClass, + interface_desc->bInterfaceSubClass, interface_desc->bInterfaceProtocol); + ret = -ENODEV; + goto exit; + } + + for (i = 0; i < endpoints; i++) { + endpoint = &host_interface->endpoint[i].desc; + endpoint_num = usb_endpoint_num(endpoint); + + if (usb_endpoint_dir_in(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + if (!usb_dev->bulk_in_pipe) { + usb_dev->bulk_in_pipe = usb_rcvbulkpipe(usb, endpoint_num); + } +#ifdef CONFIG_USB_MSG_IN_EP + else if (!usb_dev->msg_in_pipe) { + if(usb_dev->chipid != PRODUCT_ID_AIC8801){ + usb_dev->msg_in_pipe = usb_rcvbulkpipe(usb, endpoint_num); + } + } +#endif + } + + if (usb_endpoint_dir_out(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + if (!usb_dev->bulk_out_pipe) + { + usb_dev->bulk_out_pipe = usb_sndbulkpipe(usb, endpoint_num); + } +#ifdef CONFIG_USB_MSG_OUT_EP + else if (!usb_dev->msg_out_pipe) { + usb_dev->msg_out_pipe = usb_sndbulkpipe(usb, endpoint_num); + } +#endif + + } + } + + if (usb_dev->bulk_in_pipe == 0) { + usb_err("No RX (in) Bulk EP found\n"); + ret = -ENODEV; + goto exit; + } + if (usb_dev->bulk_out_pipe == 0) { + usb_err("No TX (out) Bulk EP found\n"); + ret = -ENODEV; + goto exit; + } +#ifdef CONFIG_USB_MSG_OUT_EP + if (usb_dev->msg_out_pipe == 0) { + usb_err("No TX Msg (out) Bulk EP found\n"); + } +#endif +#ifdef CONFIG_USB_MSG_IN_EP + if(usb_dev->chipid != PRODUCT_ID_AIC8801){ + if (usb_dev->msg_in_pipe == 0) { + AICWFDBG(LOGINFO, "No RX Msg (in) Bulk EP found\n"); + } + } +#endif + + switch (usb->speed) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) + case USB_SPEED_SUPER_PLUS: + AICWFDBG(LOGINFO, "Aic super plus speed USB device detected\n"); + break; +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) + case USB_SPEED_SUPER: + AICWFDBG(LOGINFO, "Aic super speed USB device detected\n"); + break; +#endif + case USB_SPEED_HIGH: + AICWFDBG(LOGINFO, "Aic high speed USB device detected\n"); + break; + case USB_SPEED_FULL: + AICWFDBG(LOGINFO, "Aic full speed USB device detected\n"); + break; + default: + AICWFDBG(LOGINFO, "Aic unknown speed(%d) USB device detected\n", usb->speed); + break; + } + + exit: + return ret; +} + + + +static struct aicwf_bus_ops aicwf_usb_bus_ops = { + .start = aicwf_usb_bus_start, + .stop = aicwf_usb_bus_stop, + .txdata = aicwf_usb_bus_txdata, + .txmsg = aicwf_usb_bus_txmsg, +}; + + +#ifdef CONFIG_GPIO_WAKEUP + +static irqreturn_t rwnx_irq_handler(int irq, void *para) +{ + unsigned long irqflags; + spin_lock_irqsave(&irq_lock, irqflags); + disable_irq_nosync(hostwake_irq_num); + //do something + printk("%s gpio irq trigger\r\n", __func__); + spin_unlock_irqrestore(&irq_lock, irqflags); + atomic_dec(&irq_count); + return IRQ_HANDLED; +} + + +static int rwnx_register_hostwake_irq(struct device *dev) +{ + int ret = 0; + uint irq_flags = 0; + + spin_lock_init(&irq_lock); + +//Setting hostwake gpio for platform +//For Rockchip +#ifdef CONFIG_PLATFORM_ROCKCHIP + hostwake_irq_num = rockchip_wifi_get_oob_irq(); + printk("%s hostwake_irq_num:%d \r\n", __func__, hostwake_irq_num); + irq_flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE) & IRQF_TRIGGER_MASK; + printk("%s irq_flags:%d \r\n", __func__, irq_flags); + wakeup_enable = 1; +#endif //CONFIG_PLATFORM_ROCKCHIP + +//For Allwinner +#ifdef CONFIG_PLATFORM_ALLWINNER + int irq_flags; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) + hostwake_irq_num = sunxi_wlan_get_oob_irq(&irq_flags, &wakeup_enable); +#else + hostwake_irq_num = sunxi_wlan_get_oob_irq(); + irq_flags = sunxi_wlan_get_oob_irq_flags(); + wakeup_enable = 1; +#endif +#endif //CONFIG_PLATFORM_ALLWINNER + + + ret = request_irq(hostwake_irq_num, + rwnx_irq_handler, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, + "rwnx_irq_handler", NULL); + + enable_irq_wake(hostwake_irq_num); + + return ret; +} + +static int rwnx_unregister_hostwake_irq(struct device *dev) +{ + wakeup_enable = 0; + + printk("%s hostwake_irq_num:%d \r\n", __func__, hostwake_irq_num); + disable_irq_wake(hostwake_irq_num); + free_irq(hostwake_irq_num, NULL); + + return 0; +} + +#endif //CONFIG_GPIO_WAKEUP + +static int aicwf_usb_chipmatch(struct aic_usb_dev *usb_dev, u16_l vid, u16_l pid){ + + if(pid == USB_PRODUCT_ID_AIC8801){ + usb_dev->chipid = PRODUCT_ID_AIC8801; + AICWFDBG(LOGINFO, "%s USE AIC8801\r\n", __func__); + return 0; + }else if(pid == USB_PRODUCT_ID_AIC8800DC){ + usb_dev->chipid = PRODUCT_ID_AIC8800DC; + AICWFDBG(LOGINFO, "%s USE AIC8800DC\r\n", __func__); + return 0; + }else if(pid == USB_PRODUCT_ID_AIC8800DW){ + usb_dev->chipid = PRODUCT_ID_AIC8800DW; + AICWFDBG(LOGINFO, "%s USE AIC8800DW\r\n", __func__); + return 0; + }else if(pid == USB_PRODUCT_ID_AIC8800D81 || pid == USB_PRODUCT_ID_AIC8800D41 || vid == USB_VENDOR_ID_TP || pid == USB_PRODUCT_ID_AIC8800D83 + || vid == USB_VENDOR_ID_TENDA || pid == USB_PRODUCT_ID_AIC8800D84 || pid == USB_PRODUCT_ID_AIC8800D85 + || pid == USB_PRODUCT_ID_AIC8800D88){ + usb_dev->chipid = PRODUCT_ID_AIC8800D81; + aicwf_usb_rx_aggr = true; + AICWFDBG(LOGINFO, "%s USE AIC8800D81\r\n", __func__); + return 0; + }else if(pid == USB_PRODUCT_ID_AIC8800D81X2){ + usb_dev->chipid = PRODUCT_ID_AIC8800D81X2; + aicwf_usb_max_pkt_size = AICWF_USB_MAX_AMSDU_PKT_SIZE; + if (usb_dev->udev->speed <= USB_SPEED_HIGH) { + aicwf_usb_rx_aggr = true; + } else { + #ifdef CONFIG_PLATFORM_HI + aicwf_usb_rx_aggr = true; + #else + aicwf_usb_rx_aggr = false; + #endif + } + AICWFDBG(LOGINFO, "%s USE AIC8800D81X2\r\n", __func__); + return 0; + }else if(pid == USB_PRODUCT_ID_AIC8800D89X2){ + usb_dev->chipid = PRODUCT_ID_AIC8800D89X2; + aicwf_usb_max_pkt_size = AICWF_USB_MAX_AMSDU_PKT_SIZE; + if (usb_dev->udev->speed <= USB_SPEED_HIGH) { + aicwf_usb_rx_aggr = true; + } else { + #ifdef CONFIG_PLATFORM_HI + aicwf_usb_rx_aggr = true; + #else + aicwf_usb_rx_aggr = false; + #endif + } + AICWFDBG(LOGINFO, "%s USE AIC8800D89X2\r\n", __func__); + return 0; + }else{ + return -1; + } +} + + +static int aicwf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + int ret = 0; + struct usb_device *usb = interface_to_usbdev(intf); + struct aicwf_bus *bus_if = NULL; + struct device *dev = NULL; + struct aicwf_rx_priv* rx_priv = NULL; + struct aic_usb_dev *usb_dev = NULL; + #ifdef CONFIG_USB_TX_AGGR + struct aicwf_tx_priv *tx_priv = NULL; + #endif + + usb_dev = kzalloc(sizeof(struct aic_usb_dev), GFP_ATOMIC); + + AICWFDBG(LOGDEBUG, "%s usb_dev:%d usb_tx_buf:%d usb_rx_buf:%d\r\n", + __func__, + (int)sizeof(struct aic_usb_dev), + (int)sizeof(struct aicwf_usb_buf) * AICWF_USB_TX_URBS, + (int)sizeof(struct aicwf_usb_buf) * AICWF_USB_RX_URBS); + + + if (!usb_dev) { + AICWFDBG(LOGERROR, "%s usb_dev kzalloc fail\r\n", __func__); + return -ENOMEM; + } + + usb_dev->usb_tx_buf = vmalloc(sizeof(struct aicwf_usb_buf) * AICWF_USB_TX_URBS); + usb_dev->usb_rx_buf = vmalloc(sizeof(struct aicwf_usb_buf) * AICWF_USB_RX_URBS); + + if(!usb_dev->usb_tx_buf || !usb_dev->usb_rx_buf){ + if(usb_dev->usb_tx_buf){ + vfree(usb_dev); + } + + if(usb_dev->usb_tx_buf){ + vfree(usb_dev); + } + + if(usb_dev){ + kfree(usb_dev); + } + AICWFDBG(LOGERROR, "%s usb_tx_buf or usb_rx_buf vmalloc fail\r\n", __func__); + return -ENOMEM; + } + + memset(usb_dev->usb_tx_buf, + 0, + (int)(sizeof(struct aicwf_usb_buf) * AICWF_USB_TX_URBS)); + + memset(usb_dev->usb_rx_buf, + 0, + (int)(sizeof(struct aicwf_usb_buf) * AICWF_USB_RX_URBS)); + + + usb_dev->udev = usb; + usb_dev->dev = &usb->dev; + usb_set_intfdata(intf, usb_dev); + + ret = aicwf_usb_chipmatch(usb_dev, id->idVendor, id->idProduct); + + if (ret < 0) { + AICWFDBG(LOGERROR, "%s pid:0x%04X vid:0x%04X unsupport\n", + __func__, id->idVendor, id->idProduct); + goto out_free_bus; + } + + ret = aicwf_parse_usb(usb_dev, intf); + if (ret) { + AICWFDBG(LOGERROR, "aicwf_parse_usb err %d\n", ret); + goto out_free; + } + + ret = aicwf_usb_init(usb_dev); + if (ret) { + AICWFDBG(LOGERROR, "aicwf_usb_init err %d\n", ret); + goto out_free; + } + + bus_if = kzalloc(sizeof(struct aicwf_bus), GFP_ATOMIC); + if (!bus_if) { + ret = -ENOMEM; + goto out_free_usb; + } + + dev = usb_dev->dev; + bus_if->dev = dev; + usb_dev->bus_if = bus_if; + bus_if->bus_priv.usb = usb_dev; + dev_set_drvdata(dev, bus_if); + + bus_if->ops = &aicwf_usb_bus_ops; + + rx_priv = aicwf_rx_init(usb_dev); + if(!rx_priv) { + AICWFDBG(LOGERROR, "rx init failed\n"); + ret = -1; + goto out_free_bus; + } + usb_dev->rx_priv = rx_priv; + +#ifdef CONFIG_USB_TX_AGGR + tx_priv = aicwf_tx_init(usb_dev); + if(!tx_priv) { + usb_err("tx init fail\n"); + goto out_free_bus; + } + usb_dev->tx_priv = tx_priv; + aicwf_frame_queue_init(&tx_priv->txq, 8, TXQLEN); + spin_lock_init(&tx_priv->txqlock); + spin_lock_init(&tx_priv->txdlock); +#endif + + ret = aicwf_bus_init(0, dev); + if (ret < 0) { + AICWFDBG(LOGERROR, "aicwf_bus_init err %d\n", ret); + goto out_free_bus; + } + + ret = aicwf_bus_start(bus_if); + if (ret < 0) { + AICWFDBG(LOGERROR, "aicwf_bus_start err %d\n", ret); + goto out_free_bus; + } + + ret = aicwf_rwnx_usb_platform_init(usb_dev); + if (ret < 0) { + AICWFDBG(LOGERROR, "aicwf_rwnx_usb_platform_init err %d\n", ret); + goto out_free_bus; + } + aicwf_hostif_ready(); + +#ifdef CONFIG_GPIO_WAKEUP + rwnx_register_hostwake_irq(usb_dev->dev); +#endif + + return 0; + +out_free_bus: + aicwf_bus_deinit(dev); + kfree(bus_if); +out_free_usb: + aicwf_usb_deinit(usb_dev); +out_free: + usb_err("failed with errno %d\n", ret); + vfree(usb_dev->usb_tx_buf); + vfree(usb_dev->usb_rx_buf); + kfree(usb_dev); + usb_set_intfdata(intf, NULL); + return ret; +} + +static void aicwf_usb_disconnect(struct usb_interface *intf) +{ + struct aic_usb_dev *usb_dev = + (struct aic_usb_dev *) usb_get_intfdata(intf); + AICWFDBG(LOGINFO, "%s Enter\r\n", __func__); + + if(g_rwnx_plat->wait_disconnect_cb == false){ + atomic_set(&aicwf_deinit_atomic, 0); + down(&aicwf_deinit_sem); + } + + if (!usb_dev){ + AICWFDBG(LOGERROR, "%s usb_dev is null \r\n", __func__); + return; + } + +#if 0 + if(timer_pending(&usb_dev->rwnx_hw->p2p_alive_timer) && usb_dev->rwnx_hw->is_p2p_alive == 1){ + printk("%s del timer rwnx_hw->p2p_alive_timer \r\n", __func__); + rwnx_del_timer(&usb_dev->rwnx_hw->p2p_alive_timer); + } +#endif + aicwf_bus_deinit(usb_dev->dev); + aicwf_usb_deinit(usb_dev); + +#ifdef CONFIG_GPIO_WAKEUP + rwnx_unregister_hostwake_irq(usb_dev->dev); +#endif + + kfree(usb_dev->bus_if); + vfree(usb_dev->usb_tx_buf); + vfree(usb_dev->usb_rx_buf); + kfree(usb_dev); + AICWFDBG(LOGINFO, "%s exit\r\n", __func__); + up(&aicwf_deinit_sem); + atomic_set(&aicwf_deinit_atomic, 1); +} + +static int aicwf_usb_suspend(struct usb_interface *intf, pm_message_t state) +{ + struct aic_usb_dev *usb_dev = + (struct aic_usb_dev *) usb_get_intfdata(intf); + struct rwnx_vif *rwnx_vif, *tmp; + + AICWFDBG(LOGINFO, "%s enter\r\n", __func__); +#ifdef CONFIG_WOWLAN + rwnx_send_dummy_reboot(usb_dev->rwnx_hw); +#endif + list_for_each_entry_safe(rwnx_vif, tmp, &usb_dev->rwnx_hw->vifs, list) { + if (rwnx_vif->ndev){ + netif_tx_stop_all_queues(rwnx_vif->ndev); + mdelay(1000); + } + } + aicwf_usb_state_change(usb_dev, USB_SLEEP_ST); + aicwf_bus_stop(usb_dev->bus_if); + + list_for_each_entry_safe(rwnx_vif, tmp, &usb_dev->rwnx_hw->vifs, list) { + if (rwnx_vif->ndev) + netif_device_detach(rwnx_vif->ndev); + } + +return 0; +} + +static int aicwf_usb_resume(struct usb_interface *intf) +{ + struct aic_usb_dev *usb_dev = + (struct aic_usb_dev *) usb_get_intfdata(intf); + struct rwnx_vif *rwnx_vif, *tmp; + AICWFDBG(LOGINFO, "%s enter\r\n", __func__); + list_for_each_entry_safe(rwnx_vif, tmp, &usb_dev->rwnx_hw->vifs, list) { + if (rwnx_vif->ndev) + netif_device_attach(rwnx_vif->ndev); + } + + if (usb_dev->state != USB_UP_ST){ + aicwf_bus_start(usb_dev->bus_if); + } + + list_for_each_entry_safe(rwnx_vif, tmp, &usb_dev->rwnx_hw->vifs, list) { + if (rwnx_vif->ndev){ + netif_tx_wake_all_queues(rwnx_vif->ndev); + } + } + return 0; +} + +static int aicwf_usb_reset_resume(struct usb_interface *intf) +{ + return aicwf_usb_resume(intf); +} + +static struct usb_device_id aicwf_usb_id_table[] = { +#ifndef CONFIG_USB_BT + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800)}, +#else + {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8801, 0xff, 0xff, 0xff)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800D81, 0xff, 0xff, 0xff)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800D41, 0xff, 0xff, 0xff)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800DC, 0xff, 0xff, 0xff)}, + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800DW)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC_V2, USB_PRODUCT_ID_AIC8800D81X2, 0xff, 0xff, 0xff)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_PRODUCT_ID_AIC8800D89X2)}, + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800D83)}, + {USB_DEVICE(USB_VENDOR_ID_TP, USB_PRODUCT_ID_TP)}, + {USB_DEVICE(USB_VENDOR_ID_TP, USB_PRODUCT_ID_MERCURY)}, + {USB_DEVICE(USB_VENDOR_ID_TP, USB_PRODUCT_ID_FAST)}, + {USB_DEVICE(USB_VENDOR_ID_TENDA, USB_PRODUCT_ID_TENDA)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_PRODUCT_ID_AIC8800D83)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_PRODUCT_ID_AIC8800D84)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_PRODUCT_ID_AIC8800D85)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_PRODUCT_ID_AIC8800D88)}, +#endif + {} +}; + +MODULE_DEVICE_TABLE(usb, aicwf_usb_id_table); + +static struct usb_driver aicwf_usbdrvr = { + .name = KBUILD_MODNAME, + .probe = aicwf_usb_probe, + .disconnect = aicwf_usb_disconnect, + .id_table = aicwf_usb_id_table, + .suspend = aicwf_usb_suspend, + .resume = aicwf_usb_resume, + .reset_resume = aicwf_usb_reset_resume, +#ifdef ANDROID_PLATFORM + .supports_autosuspend = 1, +#else + .supports_autosuspend = 0, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) + .disable_hub_initiated_lpm = 1, +#endif +}; + +void aicwf_usb_register(void) +{ + if (usb_register(&aicwf_usbdrvr) < 0) { + usb_err("usb_register failed\n"); + } +} + +void aicwf_usb_exit(void) +{ + int retry = 5; + AICWFDBG(LOGINFO, "%s Enter\r\n", __func__); + + AICWFDBG(LOGDEBUG, "%s in_interrupt:%d in_softirq:%d in_atomic:%d\r\n", __func__, (int)in_interrupt(), (int)in_softirq(), (int)in_atomic()); + + do{ + AICWFDBG(LOGINFO, "aicwf_deinit_atomic is busy. waiting for 500ms retry:%d \r\n", + retry); + mdelay(500); + retry--; + if(retry == 0){ + break; + } + }while(atomic_read(&aicwf_deinit_atomic) == 0); + + atomic_set(&aicwf_deinit_atomic, 0); + if(down_timeout(&aicwf_deinit_sem, msecs_to_jiffies(SEM_TIMOUT)) != 0){ + AICWFDBG(LOGERROR, "%s semaphore waiting timeout\r\n", __func__); + } + + if(g_rwnx_plat){ + g_rwnx_plat->wait_disconnect_cb = false; + } + + + + if(!g_rwnx_plat || !g_rwnx_plat->enabled){ + AICWFDBG(LOGINFO, "g_rwnx_plat is not ready. waiting for 500ms\r\n"); + mdelay(500); + } + +#if 1 + if(g_rwnx_plat && g_rwnx_plat->enabled){ + rwnx_platform_deinit(g_rwnx_plat->usbdev->rwnx_hw); + } +#endif + + up(&aicwf_deinit_sem); + atomic_set(&aicwf_deinit_atomic, 1); + + AICWFDBG(LOGINFO, "%s usb_deregister \r\n", __func__); + + usb_deregister(&aicwf_usbdrvr); + //mdelay(500); + if(g_rwnx_plat){ + kfree(g_rwnx_plat); + } + + AICWFDBG(LOGINFO, "%s exit\r\n", __func__); + +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_usb.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_usb.h new file mode 100644 index 0000000..0ab0202 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_usb.h @@ -0,0 +1,208 @@ +/** + * aicwf_usb.h + * + * USB function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#ifndef _AICWF_USB_H_ +#define _AICWF_USB_H_ + +#include +#include "rwnx_cmds.h" + +#ifdef AICWF_USB_SUPPORT + +/* USB Device ID */ +#define USB_VENDOR_ID_AIC 0xA69C +#define USB_VENDOR_ID_TP 0x2357 +#define USB_VENDOR_ID_TENDA 0x2604 +#define USB_VENDOR_ID_AIC_V2 0x368B + +#define USB_PRODUCT_ID_TP 0x014e +#define USB_PRODUCT_ID_MERCURY 0x014b +#define USB_PRODUCT_ID_FAST 0x014f + +#define USB_PRODUCT_ID_TENDA 0x001f + +#ifndef CONFIG_USB_BT +#define USB_PRODUCT_ID_AIC8800 0x8800 +#define USB_PRODUCT_ID_AIC8801 0x8801 +#define USB_PRODUCT_ID_AIC8800DC 0x88dc +#define USB_PRODUCT_ID_AIC8800DW 0x88dd +#define USB_PRODUCT_ID_AIC8800D81 0x8d81 +#define USB_PRODUCT_ID_AIC8800D81X2 0x8d91 +#define USB_PRODUCT_ID_AIC8800D89X2 0x8d99 +#else +#define USB_PRODUCT_ID_AIC8801 0x8801 +#define USB_PRODUCT_ID_AIC8800DC 0x88dc +#define USB_PRODUCT_ID_AIC8800DW 0x88dd +#define USB_PRODUCT_ID_AIC8800D81 0x8d81 +#define USB_PRODUCT_ID_AIC8800D41 0x8d41 +#define USB_PRODUCT_ID_AIC8800D81X2 0x8d91 +#define USB_PRODUCT_ID_AIC8800D89X2 0x8d99 +#define USB_PRODUCT_ID_AIC8800D83 0x8d83 +#define USB_PRODUCT_ID_AIC8800D84 0x8d84 +#define USB_PRODUCT_ID_AIC8800D85 0x8d85 +#define USB_PRODUCT_ID_AIC8800D88 0x8d88 +#endif + +enum AICWF_IC{ + PRODUCT_ID_AIC8801 = 0, + PRODUCT_ID_AIC8800DC, + PRODUCT_ID_AIC8800DW, + PRODUCT_ID_AIC8800D81, + PRODUCT_ID_AIC8800D81X2, + PRODUCT_ID_AIC8800D89X2 +}; + + +#define AICWF_USB_RX_URBS (20)//(200) +#ifdef CONFIG_USB_MSG_IN_EP +#define AICWF_USB_MSG_RX_URBS (100) +#endif +#ifdef CONFIG_USB_TX_AGGR +#define TXQLEN (2048*4) +#define AICWF_USB_TX_URBS (50) +#else +#define AICWF_USB_TX_URBS 200//(100) +#endif +#define AICWF_USB_TX_LOW_WATER (AICWF_USB_TX_URBS/4)//25% +#define AICWF_USB_TX_HIGH_WATER (AICWF_USB_TX_LOW_WATER*3)//75% +#ifdef CONFIG_PLATFORM_HI +#define AICWF_USB_AGGR_MAX_PKT_SIZE (2048*1) +#else +#define AICWF_USB_AGGR_MAX_PKT_SIZE (2048*10) +#endif +#define AICWF_USB_MSG_MAX_PKT_SIZE (2048) +#define AICWF_USB_MAX_PKT_SIZE (2048) +#define AICWF_USB_MAX_AMSDU_PKT_SIZE (2048*6) +#define AICWF_USB_FC_PERSTA_HIGH_WATER 64 +#define AICWF_USB_FC_PERSTA_LOW_WATER 16 + + +typedef enum { + USB_TYPE_DATA = 0X00, + USB_TYPE_CFG = 0X10, + USB_TYPE_CFG_CMD_RSP = 0X11, + USB_TYPE_CFG_DATA_CFM = 0X12, + USB_TYPE_CFG_PRINT = 0X13 +} usb_type; + +enum aicwf_usb_state { + USB_DOWN_ST, + USB_UP_ST, + USB_SLEEP_ST +}; + +struct aicwf_usb_buf { + struct list_head list; + struct aic_usb_dev *usbdev; + struct urb *urb; + struct sk_buff *skb; +#ifdef CONFIG_PREALLOC_RX_SKB + struct rx_buff *rx_buff; +#endif + #ifdef CONFIG_USB_NO_TRANS_DMA_MAP + u8 *data_buf; + dma_addr_t data_dma_trans_addr; + #endif + bool cfm; + #ifdef CONFIG_USB_TX_AGGR + u8 aggr_cnt; + #endif + u8* usb_align_data; +}; + +struct aic_usb_dev { + struct rwnx_hw *rwnx_hw; + struct aicwf_bus *bus_if; + struct usb_device *udev; + struct device *dev; + struct aicwf_rx_priv* rx_priv; + enum aicwf_usb_state state; + struct rwnx_cmd_mgr cmd_mgr; + +#ifdef CONFIG_USB_TX_AGGR + struct aicwf_tx_priv *tx_priv; +#endif + + struct usb_anchor rx_submitted; + struct work_struct rx_urb_work; +#ifdef CONFIG_USB_MSG_IN_EP + struct usb_anchor msg_rx_submitted; + struct work_struct msg_rx_urb_work; +#endif + + spinlock_t rx_free_lock; + spinlock_t tx_free_lock; + spinlock_t tx_post_lock; + spinlock_t tx_flow_lock; +#ifdef CONFIG_USB_MSG_IN_EP + spinlock_t msg_rx_free_lock; +#endif + + struct list_head rx_free_list; + struct list_head tx_free_list; + struct list_head tx_post_list; +#ifdef CONFIG_USB_MSG_IN_EP + struct list_head msg_rx_free_list; +#endif + + uint bulk_in_pipe; + uint bulk_out_pipe; +#ifdef CONFIG_USB_MSG_OUT_EP + uint msg_out_pipe; +#endif +#ifdef CONFIG_USB_MSG_IN_EP + uint msg_in_pipe; +#endif + + int tx_free_count; + int tx_post_count; + bool rx_prepare_ready; +#if 0 + struct aicwf_usb_buf usb_tx_buf[AICWF_USB_TX_URBS]; + struct aicwf_usb_buf usb_rx_buf[AICWF_USB_RX_URBS]; +#else + struct aicwf_usb_buf *usb_tx_buf; + struct aicwf_usb_buf *usb_rx_buf; +#endif + +#ifdef CONFIG_USB_MSG_IN_EP + struct aicwf_usb_buf usb_msg_rx_buf[AICWF_USB_MSG_RX_URBS]; +#endif + + int msg_finished; + wait_queue_head_t msg_wait; + ulong msg_busy; + struct urb *msg_out_urb; + #ifdef CONFIG_USB_NO_TRANS_DMA_MAP + dma_addr_t cmd_dma_trans_addr; + #endif + +#ifdef CONFIG_RX_TASKLET//AIDEN tasklet + struct tasklet_struct recv_tasklet; +#endif +#ifdef CONFIG_TX_TASKLET//AIDEN tasklet + struct tasklet_struct xmit_tasklet; +#endif + u16 chipid; + bool tbusy; +}; + +extern void aicwf_usb_exit(void); +extern void aicwf_usb_register(void); +extern void aicwf_usb_tx_flowctrl(struct rwnx_hw *rwnx_hw, bool state); +#ifdef CONFIG_USB_MSG_IN_EP +int usb_msg_busrx_thread(void *data); +#endif +int usb_bustx_thread(void *data); +int usb_busrx_thread(void *data); + + +extern void aicwf_hostif_ready(void); + +#endif /* AICWF_USB_SUPPORT */ +#endif /* _AICWF_USB_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_wext_linux.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_wext_linux.c new file mode 100644 index 0000000..d97a82f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_wext_linux.c @@ -0,0 +1,1224 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "aicwf_debug.h" +#include "rwnx_msg_tx.h" +#include "aicwf_wext_linux.h" +#include "rwnx_defs.h" + +#define WLAN_CAPABILITY_ESS (1<<0) +#define WLAN_CAPABILITY_IBSS (1<<1) +#define WLAN_CAPABILITY_PRIVACY (1<<4) + +#define IEEE80211_HT_CAP_SGI_20 0x0020 +#define IEEE80211_HT_CAP_SGI_40 0x0040 + +#define IEEE80211_HE_CH_BW_SET_160_80P80 1 << 4 +#define IEEE80211_HE_CH_BW_SET_160_IN_5G 1 << 3 +#define IEEE80211_HE_CH_BW_SET_40_AND_80_IN_5G 1 << 2 +#define IEEE80211_HE_CH_BW_SET_40_IN_2_4G 1 << 1 + +#if WIRELESS_EXT < 17 + #define IW_QUAL_QUAL_INVALID 0x10 + #define IW_QUAL_LEVEL_INVALID 0x20 + #define IW_QUAL_NOISE_INVALID 0x40 + #define IW_QUAL_QUAL_UPDATED 0x1 + #define IW_QUAL_LEVEL_UPDATED 0x2 + #define IW_QUAL_NOISE_UPDATED 0x4 +#endif + +#define MAX_WPA_IE_LEN (256) + + +/* 20/40/80, ShortGI, MCS Rate */ +const u16 VHT_MCS_DATA_RATE[3][2][30] = { + { + { + 13, 26, 39, 52, 78, 104, 117, 130, 156, 156, + 26, 52, 78, 104, 156, 208, 234, 260, 312, 312, + 39, 78, 117, 156, 234, 312, 351, 390, 468, 520 + }, /* Long GI, 20MHz */ + { + 14, 29, 43, 58, 87, 116, 130, 144, 173, 173, + 29, 58, 87, 116, 173, 231, 260, 289, 347, 347, + 43, 87, 130, 173, 260, 347, 390, 433, 520, 578 + } + }, /* Short GI, 20MHz */ + { + { + 27, 54, 81, 108, 162, 216, 243, 270, 324, 360, + 54, 108, 162, 216, 324, 432, 486, 540, 648, 720, + 81, 162, 243, 324, 486, 648, 729, 810, 972, 1080 + }, /* Long GI, 40MHz */ + { + 30, 60, 90, 120, 180, 240, 270, 300, 360, 400, + 60, 120, 180, 240, 360, 480, 540, 600, 720, 800, + 90, 180, 270, 360, 540, 720, 810, 900, 1080, 1200 + } + }, /* Short GI, 40MHz */ + { + { + 59, 117, 176, 234, 351, 468, 527, 585, 702, 780, + 117, 234, 351, 468, 702, 936, 1053, 1170, 1404, 1560, + 176, 351, 527, 702, 1053, 1404, 1580, 1755, 2106, 2340 + }, /* Long GI, 80MHz */ + { + 65, 130, 195, 260, 390, 520, 585, 650, 780, 867, + 130, 260, 390, 520, 780, 1040, 1170, 1300, 1560, 1734, + 195, 390, 585, 780, 1170, 1560, 1755, 1950, 2340, 2600 + } /* Short GI, 80MHz */ + } +}; + + +/*HE 20/40/80,MCS Rate */ +const u16 HE_MCS_DATA_RATE[3][30] = { + { + 9, 17, 26, 34, 52, 69, 77, 86, 103, 115, + 129, 143, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }, /* 20MHz */ + { + 17, 34, 52, 69, 103, 138, 155, 172, 207, 229, + 258, 286, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }, /* 40MHz */ + { + 36, 72, 108, 144, 216, 288, 324, 360, 432, 480, + 540, 601, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }, /* 80MHz */ + +}; + + +#if WIRELESS_EXT >= 17 +struct iw_statistics iwstats; + +static struct iw_statistics *aicwf_get_wireless_stats(struct net_device *dev) +{ + int tmp_level = -100; + int tmp_qual = 0; + int tmp_noise = 0; + + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_sta *sta = NULL; + + union rwnx_rate_ctrl_info *rate_info; + struct mm_get_sta_info_cfm cfm; + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + if(rwnx_vif->sta.ap){ + sta = rwnx_vif->sta.ap; + rwnx_send_get_sta_info_req(rwnx_hw, sta->sta_idx, &cfm); + rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info; + tmp_level = cfm.rssi; + } + + + iwstats.qual.level = tmp_level; + iwstats.qual.qual = tmp_qual; + iwstats.qual.noise = tmp_noise; + iwstats.qual.updated = 0x07; + iwstats.qual.updated = iwstats.qual.updated | IW_QUAL_DBM; + + return &iwstats; +} +#endif + + +static int aicwf_get_name(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_sta *sta = NULL; + + union rwnx_rate_ctrl_info *rate_info; + struct mm_get_sta_info_cfm cfm; + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + if(rwnx_vif->sta.ap){ + sta = rwnx_vif->sta.ap; + rwnx_send_get_sta_info_req(rwnx_hw, sta->sta_idx, &cfm); + rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info; + + switch (rate_info->formatModTx) { + case FORMATMOD_NON_HT: + case FORMATMOD_NON_HT_DUP_OFDM: + snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg"); + break; + case FORMATMOD_HT_MF: + case FORMATMOD_HT_GF: + snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11n"); + break; + case FORMATMOD_VHT: + snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11ac"); + break; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + case FORMATMOD_HE_MU: + case FORMATMOD_HE_SU: + case FORMATMOD_HE_ER: + snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11ax"); + break; +#else + //kernel not support he + case FORMATMOD_HE_MU: + case FORMATMOD_HE_SU: + case FORMATMOD_HE_ER: + snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11ax"); + break; +#endif + } + + + }else{ + snprintf(wrqu->name, IFNAMSIZ, "unassociated"); + } + + + return 0; +} + +static int aicwf_get_freq(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + if(rwnx_vif->sta.ap){ + wrqu->freq.m = rwnx_vif->sta.ap->center_freq * 100000; + wrqu->freq.e = 1; + wrqu->freq.i = ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq); + }else{ + wrqu->freq.m = 2412 * 100000; + wrqu->freq.e = 1; + wrqu->freq.i = 1; + } + + return 0; +} + +static int aicwf_get_mode(struct net_device *dev, struct iw_request_info *a, + union iwreq_data *wrqu, char *b) +{ + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + wrqu->mode = IW_MODE_AUTO; +#if 0 + if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == _TRUE) + wrqu->mode = IW_MODE_INFRA; + else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE) || + (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE)) + + wrqu->mode = IW_MODE_ADHOC; + else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE) + wrqu->mode = IW_MODE_MASTER; + else if (check_fwstate(pmlmepriv, WIFI_MONITOR_STATE) == _TRUE) + wrqu->mode = IW_MODE_MONITOR; + else + wrqu->mode = IW_MODE_AUTO; +#endif + + return 0; + +} + + +static int aicwf_get_range(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct iw_range *range = (struct iw_range *)extra; + u16 val = 0; + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + + wrqu->data.length = sizeof(*range); + memset(range, 0, sizeof(*range)); + + /* Let's try to keep this struct in the same order as in + * linux/include/wireless.h + */ + + /* TODO: See what values we can set, and remove the ones we can't + * set, or fill them with some default data. + */ + + /* ~5 Mb/s real (802.11b) */ + range->throughput = 5 * 1000 * 1000; + + /* TODO: Not used in 802.11b? + * range->min_nwid; Minimal NWID we are able to set */ + /* TODO: Not used in 802.11b? + * range->max_nwid; Maximal NWID we are able to set */ + + /* Old Frequency (backward compat - moved lower ) */ + /* range->old_num_channels; + * range->old_num_frequency; + * range->old_freq[6]; Filler to keep "version" at the same offset */ + + /* signal level threshold range */ + + /* Quality of link & SNR stuff */ + /* Quality range (link, level, noise) + * If the quality is absolute, it will be in the range [0 ; max_qual], + * if the quality is dBm, it will be in the range [max_qual ; 0]. + * Don't forget that we use 8 bit arithmetics... + * + * If percentage range is 0~100 + * Signal strength dbm range logical is -100 ~ 0 + * but usually value is -90 ~ -20 + */ + range->max_qual.qual = 100; +#ifdef CONFIG_SIGNAL_DISPLAY_DBM + range->max_qual.level = (u8)-100; + range->max_qual.noise = (u8)-100; + range->max_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */ + range->max_qual.updated |= IW_QUAL_DBM; +#else /* !CONFIG_SIGNAL_DISPLAY_DBM */ + /* percent values between 0 and 100. */ + range->max_qual.level = 100; + range->max_qual.noise = 100; + range->max_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */ +#endif /* !CONFIG_SIGNAL_DISPLAY_DBM */ + + /* This should contain the average/typical values of the quality + * indicator. This should be the threshold between a "good" and + * a "bad" link (example : monitor going from green to orange). + * Currently, user space apps like quality monitors don't have any + * way to calibrate the measurement. With this, they can split + * the range between 0 and max_qual in different quality level + * (using a geometric subdivision centered on the average). + * I expect that people doing the user space apps will feedback + * us on which value we need to put in each driver... */ + range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */ +#ifdef CONFIG_SIGNAL_DISPLAY_DBM + /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ + range->avg_qual.level = (u8)-70; + range->avg_qual.noise = 0; + range->avg_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */ + range->avg_qual.updated |= IW_QUAL_DBM; +#else /* !CONFIG_SIGNAL_DISPLAY_DBM */ + /* TODO: Find real 'good' to 'bad' threshol value for RSSI */ + range->avg_qual.level = 30; + range->avg_qual.noise = 100; + range->avg_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */ +#endif /* !CONFIG_SIGNAL_DISPLAY_DBM */ +#if 0 + range->num_bitrates = RATE_COUNT; + + for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) + range->bitrate[i] = rtw_rates[i]; + + range->min_frag = MIN_FRAG_THRESHOLD; + range->max_frag = MAX_FRAG_THRESHOLD; +#endif + + range->pm_capa = 0; + + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 16; + + /* range->retry_capa; What retry options are supported + * range->retry_flags; How to decode max/min retry limit + * range->r_time_flags; How to decode max/min retry life + * range->min_retry; Minimal number of retries + * range->max_retry; Maximal number of retries + * range->min_r_time; Minimal retry lifetime + * range->max_r_time; Maximal retry lifetime */ +#if 0 + for (i = 0, val = 0; i < rfctl->max_chan_nums; i++) { + + /* Include only legal frequencies for some countries */ + if (rfctl->channel_set[i].ChannelNum != 0) { + range->freq[val].i = rfctl->channel_set[i].ChannelNum; + range->freq[val].m = rtw_ch2freq(rfctl->channel_set[i].ChannelNum) * 100000; + range->freq[val].e = 1; + val++; + } + + if (val == IW_MAX_FREQUENCIES) + break; + } +#endif + range->num_channels = val; + range->num_frequency = val; + + /* Commented by Albert 2009/10/13 + * The following code will proivde the security capability to network manager. + * If the driver doesn't provide this capability to network manager, + * the WPA/WPA2 routers can't be choosen in the network manager. */ + + /* + #define IW_SCAN_CAPA_NONE 0x00 + #define IW_SCAN_CAPA_ESSID 0x01 + #define IW_SCAN_CAPA_BSSID 0x02 + #define IW_SCAN_CAPA_CHANNEL 0x04 + #define IW_SCAN_CAPA_MODE 0x08 + #define IW_SCAN_CAPA_RATE 0x10 + #define IW_SCAN_CAPA_TYPE 0x20 + #define IW_SCAN_CAPA_TIME 0x40 + */ + +#if WIRELESS_EXT > 17 + range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | + IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; +#endif + +#ifdef IW_SCAN_CAPA_ESSID /* WIRELESS_EXT > 21 */ + range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE | IW_SCAN_CAPA_BSSID | + IW_SCAN_CAPA_CHANNEL | IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE; +#endif + + + + return 0; + +} + + +static char *aicwf_get_iwe_stream_mac_addr(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + /* AP MAC address */ + iwe->cmd = SIOCGIWAP; + iwe->u.ap_addr.sa_family = ARPHRD_ETHER; + + if(scan_re->bss && &scan_re->bss->bssid[0]){ + memcpy(iwe->u.ap_addr.sa_data, scan_re->bss->bssid, ETH_ALEN); + } + + start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_ADDR_LEN); + return start; +} + + +static inline char *aicwf_get_iwe_stream_essid(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload; + const u8 *ie = mgmt->u.beacon.variable; + u8 *ssid; + int ssid_len = 0; + + //get ssid len form ie + ssid_len = ie[1]; + + //get ssid form ie + ssid = (u8*)vmalloc(sizeof(char)* (ssid_len + 1)); + memset(ssid, 0, (ssid_len + 1)); + memcpy(ssid, &ie[2], ssid_len); + + + //AICWFDBG(LOGDEBUG, "%s len:%d ssid:%s\r\n", __func__, ssid_len, ssid); + + /* Add the ESSID */ + iwe->cmd = SIOCGIWESSID; + iwe->u.data.flags = 1; + iwe->u.data.length = min((u16)ssid_len, (u16)32); + start = iwe_stream_add_point(info, start, stop, iwe, ssid); + + vfree(ssid); + + return start; +} + + + +static inline char *aicwf_get_iwe_stream_protocol(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + u16 ht_cap = false; + u16 vht_cap = false; + u16 he_cap = false; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload; + u8 *payload = mgmt->u.beacon.variable; + const u8 *ie_content; + + /* parsing HT_CAP_IE */ + ie_content = NULL; + ie_content = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, payload, scan_re->ind->length); + if (ie_content != NULL){ + ht_cap = true; + } + + /* parsing VHT_CAP_IE */ + ie_content = NULL; + ie_content = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, payload, scan_re->ind->length); + if (ie_content != NULL){ + vht_cap = true; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)|| defined(CONFIG_HE_FOR_OLD_KERNEL) + /* parsing HE_CAP_IE */ + ie_content = NULL; + ie_content = cfg80211_find_ie(WLAN_EID_EXTENSION, payload, scan_re->ind->length); + if (ie_content != NULL && ie_content[2] == WLAN_EID_EXT_HE_CAPABILITY){ + he_cap = true; + } +#endif + + /* Add the protocol name */ + iwe->cmd = SIOCGIWNAME; + + if (ieee80211_frequency_to_channel(scan_re->ind->center_freq) > 14) { + if (he_cap == true){ + snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11ax"); + }else if(vht_cap == true){ + snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11ac"); + }else{ + if (ht_cap == true) + snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11an"); + else + snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11a"); + } + } else { + if(he_cap == true){ + snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11ax"); + }else if (ht_cap == true){ + snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11bgn"); + }else{ + snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11bg"); + } + } + + start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_CHAR_LEN); + return start; +} + + + +static inline char *aicwf_get_iwe_stream_rssi(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + iwe->cmd = IWEVQUAL; + + iwe->u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED + | IW_QUAL_NOISE_INVALID + | IW_QUAL_DBM; + + iwe->u.qual.level = (u8)scan_re->ind->rssi; + iwe->u.qual.qual = 100;//scan_re->bss->signal; + iwe->u.qual.noise = 0; + + start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_QUAL_LEN); + + return start; +} + +static inline char *aicwf_get_iwe_stream_chan(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + /* Add frequency/channel */ + iwe->cmd = SIOCGIWFREQ; + iwe->u.freq.m = scan_re->ind->center_freq * 100000; + iwe->u.freq.e = 1; + iwe->u.freq.i = ieee80211_frequency_to_channel(scan_re->ind->center_freq); + start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_FREQ_LEN); + + return start; +} + + +static inline char *aicwf_get_iwe_stream_mode(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + + u16 cap = scan_re->bss->capability; + /* Add mode */ + if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_ESS)) { + iwe->cmd = SIOCGIWMODE; + if (cap & WLAN_CAPABILITY_ESS) + iwe->u.mode = IW_MODE_MASTER; + else + iwe->u.mode = IW_MODE_ADHOC; + + start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_UINT_LEN); + } + return start; + +} + +static inline char *aicwf_get_iwe_stream_encryption(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload; + const u8 *ie = mgmt->u.beacon.variable; + u16 cap = scan_re->bss->capability; + u8 *ssid; + int ssid_len = 0; + + //get ssid len form ie + ssid_len = ie[1]; + + //get ssid form ie + ssid = (u8*)vmalloc(sizeof(char)* (ssid_len + 1)); + memset(ssid, 0, (ssid_len + 1)); + memcpy(ssid, &ie[2], ssid_len); + + + /* Add encryption capability */ + iwe->cmd = SIOCGIWENCODE; + if (cap & WLAN_CAPABILITY_PRIVACY) + iwe->u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe->u.data.flags = IW_ENCODE_DISABLED; + iwe->u.data.length = 0; + start = iwe_stream_add_point(info, start, stop, iwe, ssid); + vfree(ssid); + + return start; + +} + +static inline char *aicwf_get_iwe_stream_rate(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload; + u8 *payload = mgmt->u.beacon.variable; + const u8 *ie_content; + + u16 mcs_rate = 0; + u8 bw_40MHz = 0; + u8 short_GI = 0; + u16 max_rate = 0; + u8 bw_160MHz = 0; + + u16 ht_cap = false; + struct ieee80211_ht_cap *ht_capie; + + u16 vht_cap = false; + u8 tx_mcs_map[2]; + u8 tx_mcs_index = 0; + u16 vht_data_rate = 0; + + u16 he_cap = false; + u8 he_ch_width_set = 0; + u8 he_bw = 0; + + /* parsing HT_CAP_IE */ + ie_content = NULL; + ie_content = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, payload, scan_re->ind->length); + if (ie_content != NULL){ + ht_cap = true; + ht_capie = (struct ieee80211_ht_cap *)(ie_content + 2); + bw_40MHz = (ht_capie->cap_info & NL80211_CHAN_WIDTH_40) ? 1 : 0; + short_GI = (ht_capie->cap_info & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) ? 1 : 0; + memcpy(&mcs_rate, ht_capie->mcs.rx_mask, 2); + } + + + + /* parsing VHT_CAP_IE */ + ie_content = NULL; + ie_content = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, payload, scan_re->ind->length); + if (ie_content != NULL){ + + vht_cap = true; + bw_160MHz = ((*(u8*)(ie_content + 2)) >> 2) & ((u8)(0xFF >> (8 - (2)))); + if (bw_160MHz){ + short_GI = ((*(u8*)(ie_content + 2)) >> 6) & ((u8)(0xFF >> (8 - (1)))); + }else{ + short_GI = ((*(u8*)(ie_content + 2)) >> 5) & ((u8)(0xFF >> (8 - (1)))); + } + + memcpy(tx_mcs_map, ((ie_content + 2)+8), 2); + + tx_mcs_index = (tx_mcs_map[0] & 0x0F) - 1; + if(ieee80211_frequency_to_channel(scan_re->ind->center_freq) > 14){ + vht_data_rate = VHT_MCS_DATA_RATE[2][short_GI][tx_mcs_index]; + }else{ + vht_data_rate = VHT_MCS_DATA_RATE[1][short_GI][tx_mcs_index]; + } + //TO DO: + //need to counter antenna number for AC + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)|| defined(CONFIG_HE_FOR_OLD_KERNEL) + /* parsing HE_CAP_IE */ + ie_content = NULL; + /*0xFF+len+WLAN_EID_EXTENSION+playload */ + ie_content = cfg80211_find_ie(WLAN_EID_EXTENSION, payload, scan_re->ind->length); + if (ie_content != NULL && ie_content[2] == WLAN_EID_EXT_HE_CAPABILITY){ + he_cap = true; + he_ch_width_set = ie_content[8]; + if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_160_80P80){ + he_bw = NL80211_CHAN_WIDTH_80; + }else if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_160_IN_5G){ + he_bw = NL80211_CHAN_WIDTH_160; + }else if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_40_AND_80_IN_5G){ + he_bw = NL80211_CHAN_WIDTH_40; + }else if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_40_IN_2_4G){ + he_bw = NL80211_CHAN_WIDTH_40; + }else{ + he_bw = NL80211_CHAN_WIDTH_20; + } + //TO DO: + //need to counter antenna number for AX + } +#endif + + if(he_cap == true){ + if(he_bw == NL80211_CHAN_WIDTH_20){ + max_rate = 144; + }else if(he_bw == NL80211_CHAN_WIDTH_40){ + max_rate = 287; + }else if(he_bw == NL80211_CHAN_WIDTH_80){ + max_rate = 601; + }else if(he_bw == NL80211_CHAN_WIDTH_160){ + max_rate = 1147; + } + max_rate = max_rate * 2; + }else if(vht_cap == true){ + max_rate = vht_data_rate; + }else if (ht_cap == true) { + if (mcs_rate & 0x8000) /* MCS15 */ + max_rate = (bw_40MHz) ? ((short_GI) ? 300 : 270) : ((short_GI) ? 144 : 130); + + else if (mcs_rate & 0x0080) /* MCS7 */ + max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) : ((short_GI) ? 72 : 65); + else { /* default MCS7 */ + max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) : ((short_GI) ? 72 : 65); + } + } + + iwe->cmd = SIOCGIWRATE; + iwe->u.bitrate.fixed = iwe->u.bitrate.disabled = 0; + iwe->u.bitrate.value = max_rate * 1000000; + start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_PARAM_LEN); + return start ; +} + + + +int aic_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len) +{ + + u8 authmode; + u8 sec_idx; + u8 wpa_oui[4] = {0x00, 0x50, 0xf2, 0x01}; + uint cnt = 0; + + /* Search required WPA or WPA2 IE and copy to sec_ie[ ] */ + + cnt = 0; + + sec_idx = 0; + + while (cnt < in_len) { + authmode = in_ie[cnt]; + + if ((authmode == 0xdd/*_WPA_IE_ID_*/) && (memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4) == 0)) { + if (wpa_ie) + memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); + + *wpa_len = in_ie[cnt + 1] + 2; + cnt += in_ie[cnt + 1] + 2; /* get next */ + } else { + if (authmode == 0x30/*_WPA2_IE_ID_*/) { + if (rsn_ie) + memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); + + *rsn_len = in_ie[cnt + 1] + 2; + cnt += in_ie[cnt + 1] + 2; /* get next */ + } else { + cnt += in_ie[cnt + 1] + 2; /* get next */ + } + } + + } + + + return *rsn_len + *wpa_len; + +} + + +static inline char *aicwf_get_iwe_stream_wpa_wpa2(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload; + u8 *payload = mgmt->u.beacon.variable; + int buf_size = MAX_WPA_IE_LEN * 2; + u8 *pbuf; + + u8 wpa_ie[255] = {0}, rsn_ie[255] = {0}; + u16 i, wpa_len = 0, rsn_len = 0; + u8 *p; + int out_len = 0; + + pbuf = (u8*)vmalloc(sizeof(u8) * buf_size); + + if (pbuf) { + p = pbuf; + + /* parsing WPA/WPA2 IE */ + out_len = aic_get_sec_ie(payload , scan_re->ind->length, rsn_ie, &rsn_len, wpa_ie, &wpa_len); + + if (wpa_len > 0) { + + memset(pbuf, 0, buf_size); + p += sprintf(p, "wpa_ie="); + for (i = 0; i < wpa_len; i++) + p += sprintf(p, "%02x", wpa_ie[i]); + + if (wpa_len > 100) { + printk("-----------------Len %d----------------\n", wpa_len); + for (i = 0; i < wpa_len; i++) + printk("%02x ", wpa_ie[i]); + printk("\n"); + printk("-----------------Len %d----------------\n", wpa_len); + } + + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVCUSTOM; + iwe->u.data.length = strlen(pbuf); + start = iwe_stream_add_point(info, start, stop, iwe, pbuf); + + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVGENIE; + iwe->u.data.length = wpa_len; + start = iwe_stream_add_point(info, start, stop, iwe, wpa_ie); + } + if (rsn_len > 0) { + + memset(pbuf, 0, buf_size); + p += sprintf(p, "rsn_ie="); + for (i = 0; i < rsn_len; i++) + p += sprintf(p, "%02x", rsn_ie[i]); + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVCUSTOM; + iwe->u.data.length = strlen(pbuf); + start = iwe_stream_add_point(info, start, stop, iwe, pbuf); + + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVGENIE; + iwe->u.data.length = rsn_len; + start = iwe_stream_add_point(info, start, stop, iwe, rsn_ie); + } + + vfree(pbuf); + } + return start; +} + + +u8 aicwf_get_is_wps_ie(u8 *ie_ptr, uint *wps_ielen) +{ + u8 match = false; + u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04}; + + if (ie_ptr == NULL) + return match; + + eid = ie_ptr[0]; + + if ((eid == 0xdd/*_WPA_IE_ID_*/) && (memcmp(&ie_ptr[2], wps_oui, 4) == 0)) { + *wps_ielen = ie_ptr[1] + 2; + match = true; + } + return match; +} + + +static inline char *aicwf_get_iwe_stream_wps(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop, struct iw_event *iwe) +{ + + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload; + u8 *payload = mgmt->u.beacon.variable; + + /* parsing WPS IE */ + uint cnt = 0, total_ielen; + u8 *wpsie_ptr = NULL; + uint wps_ielen = 0; + u8 *ie_ptr = payload; + + total_ielen = scan_re->ind->length; + + while (cnt < total_ielen) { + if (aicwf_get_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) { + wpsie_ptr = &ie_ptr[cnt]; + iwe->cmd = IWEVGENIE; + iwe->u.data.length = (u16)wps_ielen; + start = iwe_stream_add_point(info, start, stop, iwe, wpsie_ptr); + } + cnt += ie_ptr[cnt + 1] + 2; /* goto next */ + } + return start; +} + + +static char *translate_scan(struct rwnx_hw* rwnx_hw, + struct iw_request_info *info, struct scanu_result_wext *scan_re, + char *start, char *stop) +{ + struct iw_event iwe; + memset(&iwe, 0, sizeof(iwe)); + + if(scan_re->bss){ + start = aicwf_get_iwe_stream_mac_addr(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_essid(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_protocol(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_chan(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_mode(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_encryption(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_rate(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_wpa_wpa2(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_wps(rwnx_hw, info, scan_re, start, stop, &iwe); + start = aicwf_get_iwe_stream_rssi(rwnx_hw, info, scan_re, start, stop, &iwe); + return start; + } else{ + return start; + } +} + + + +static int aicwf_get_wap(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + wrqu->ap_addr.sa_family = ARPHRD_ETHER; + + memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + + if(rwnx_vif->sta.ap){ + memcpy(wrqu->ap_addr.sa_data, rwnx_vif->sta.ap->mac_addr, ETH_ALEN); + }else{ + memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + } + + return 0; + +} + + +static int aicwf_set_scan(struct net_device *dev, struct iw_request_info *a, + union iwreq_data *wrqu, char *extra) +{ + int ret = 0; + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw; + struct cfg80211_scan_request *request; + int index = 0; + struct wiphy *wiphy = priv_to_wiphy(rwnx_hw); + unsigned long wext_scan_timeout; + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + if(wiphy == NULL){ + printk("aic_wiphy error \r\n"); + + } + + if (rwnx_hw->wext_scan || rwnx_hw->scanning) { + AICWFDBG(LOGINFO, "is scanning, abort\n"); + ret = rwnx_send_scanu_cancel_req(rwnx_hw, NULL); + if (ret) + return ret; + msleep(150); + } + + rwnx_hw->wext_scan = 1; + + request = (struct cfg80211_scan_request *)vmalloc(sizeof(struct cfg80211_scan_request)); + + request->n_channels = rwnx_hw->support_freqs_number; + request->n_ssids = 0; + request->no_cck = false; + request->ie = NULL; + request->ie_len = 0; + + for(index = 0;index < rwnx_hw->support_freqs_number; index++){ + request->channels[index] = ieee80211_get_channel(wiphy, + rwnx_hw->support_freqs[index]); + if(request->channels[index] == NULL){ + AICWFDBG(LOGERROR, "%s ERROR!!! channels is NULL", __func__); + continue; + } + } + +#if WIRELESS_EXT >= 17 + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + int len = min((int)req->essid_len, 32); + request->ssids = kmalloc(sizeof(struct cfg80211_ssid), GFP_KERNEL); + if(!request->ssids){ + AICWFDBG(LOGERROR, "%s Failied to alloc memory for ssids", __func__); + return -ENOMEM; + } + memset(request->ssids, 0, sizeof(struct cfg80211_ssid)); + memcpy(request->ssids[0].ssid, req->essid, len); + request->ssids[0].ssid_len = len; + request->n_ssids = 1; + AICWFDBG(LOGDEBUG,"IW_SCAN_THIS_ESSID, ssid=%s, len=%d\n", req->essid, req->essid_len); + } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) + AICWFDBG(LOGDEBUG,"aic_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n"); + } +#endif + if ((ret = rwnx_send_scanu_req(rwnx_hw, rwnx_vif, request))){ + return ret; + } + + rwnx_vif->rwnx_hw->scan_request = request; + wext_scan_timeout = msecs_to_jiffies(5000); + + if (!wait_for_completion_killable_timeout(&rwnx_hw->wext_scan_com, wext_scan_timeout)) { + AICWFDBG(LOGERROR, "%s WEXT scan timeout", __func__); + } + return 0; +} + +static int aicwf_get_scan(struct net_device *dev, struct iw_request_info *a, + union iwreq_data *wrqu, char *extra) +{ + int ret = 0; + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw; + struct scanu_result_wext *scan_re; + struct scanu_result_wext *tmp; + char *start = extra; + char *stop = start + wrqu->data.length; + + AICWFDBG(LOGDEBUG, "%s Enter %p %p len:%d \r\n", __func__, start, stop, wrqu->data.length); + + //TODO: spinlock + list_for_each_entry_safe(scan_re, tmp, &rwnx_hw->wext_scanre_list, scanu_re_list) { + start = translate_scan(rwnx_hw, a, scan_re, start, stop); + if ((stop - start) < 768) { + return -E2BIG; + } + + } + + list_for_each_entry_safe(scan_re, tmp, &rwnx_hw->wext_scanre_list, scanu_re_list) { + list_del(&scan_re->scanu_re_list); + vfree(scan_re->payload); + vfree(scan_re->ind); + vfree(scan_re); + scan_re = NULL; + } + + + wrqu->data.length = start - extra; + wrqu->data.flags = 0; + + return ret ; + +} + + +static int aicwf_get_essid(struct net_device *dev, + struct iw_request_info *a, + union iwreq_data *wrqu, char *extra) +{ + int ret = 0; + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw; + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + wrqu->essid.length = strlen(rwnx_hw->wext_essid); + memcpy(extra, rwnx_hw->wext_essid, strlen(rwnx_hw->wext_essid)); + wrqu->essid.flags = 1; + + + return ret; + +} + + +static int aicwf_get_nick(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + if (extra) { + wrqu->data.length = 8; + wrqu->data.flags = 1; + memcpy(extra, "AIC@8800", 8); + } + + return 0; + +} + +static int aicwf_get_rate(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + u16 max_rate = 0; + struct rwnx_vif* rwnx_vif = netdev_priv(dev); + struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_sta *sta = NULL; + + union rwnx_rate_ctrl_info *rate_info; + struct mm_get_sta_info_cfm cfm; + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + if(rwnx_vif->sta.ap){ + sta = rwnx_vif->sta.ap; + rwnx_send_get_sta_info_req(rwnx_hw, sta->sta_idx, &cfm); + rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info; + + switch (rate_info->formatModTx) { + case FORMATMOD_NON_HT: + case FORMATMOD_NON_HT_DUP_OFDM: + //get bg mode datarate + max_rate = tx_legrates_lut_rate[rate_info->mcsIndexTx]/10; + break; + case FORMATMOD_HT_MF: + case FORMATMOD_HT_GF: + case FORMATMOD_VHT: + //get bg mode MCS index + max_rate = VHT_MCS_DATA_RATE[rate_info->bwTx][1][rate_info->mcsIndexTx]/2; + break; + case FORMATMOD_HE_MU: + case FORMATMOD_HE_SU: + case FORMATMOD_HE_ER: + //get bg mode MCS index + max_rate = HE_MCS_DATA_RATE[rate_info->bwTx][rate_info->mcsIndexTx]; + break; + } + } + + wrqu->bitrate.fixed = 0; /* no auto select */ + wrqu->bitrate.value = max_rate * 1000000; + + return 0; +} + + +static int aicwf_get_enc(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *keybuf) +{ + int ret = 0; + struct iw_point *erq = &(wrqu->encoding); + + AICWFDBG(LOGTRACE, "%s Enter", __func__); + + erq->length = 0; + erq->flags |= IW_ENCODE_ENABLED; + + + return ret; + +} + + +static iw_handler aic_handlers[] = { + NULL, /* SIOCSIWCOMMIT */ + aicwf_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + NULL, /* SIOCSIWFREQ */ + aicwf_get_freq, /* SIOCGIWFREQ */ + NULL, /* SIOCSIWMODE */ + aicwf_get_mode, /* SIOCGIWMODE */ + NULL, /* SIOCSIWSENS */ + NULL, /* SIOCGIWSENS */ + NULL, /* SIOCSIWRANGE */ + aicwf_get_range, /* SIOCGIWRANGE */ + NULL, /* SIOCSIWPRIV */ + NULL, /* SIOCGIWPRIV */ + NULL, /* SIOCSIWSTATS */ + NULL, /* SIOCGIWSTATS */ + NULL, /* SIOCSIWSPY */ + NULL, /* SIOCGIWSPY */ + NULL, /* SIOCGIWTHRSPY */ + NULL, /* SIOCWIWTHRSPY */ + NULL, /* SIOCSIWAP */ + aicwf_get_wap, /* SIOCGIWAP */ + NULL, /* request MLME operation; uses struct iw_mlme */ + NULL, /* SIOCGIWAPLIST -- depricated */ + aicwf_set_scan, /* SIOCSIWSCAN */ + aicwf_get_scan, /* SIOCGIWSCAN */ + NULL, /* SIOCSIWESSID */ + aicwf_get_essid, /* SIOCGIWESSID */ + NULL, /* SIOCSIWNICKN */ + aicwf_get_nick, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + NULL, /* SIOCSIWRATE */ + aicwf_get_rate, /* SIOCGIWRATE */ + NULL, /* SIOCSIWRTS */ + NULL, /* SIOCGIWRTS */ + NULL, /* SIOCSIWFRAG */ + NULL, /* SIOCGIWFRAG */ + NULL, /* SIOCSIWTXPOW */ + NULL, /* SIOCGIWTXPOW */ + NULL, /* SIOCSIWRETRY */ + NULL, /* SIOCGIWRETRY */ + NULL, /* SIOCSIWENCODE */ + aicwf_get_enc, /* SIOCGIWENCODE */ + NULL, /* SIOCSIWPOWER */ + NULL, /* SIOCGIWPOWER */ + NULL, /*---hole---*/ + NULL, /*---hole---*/ + NULL, /* SIOCSIWGENIE */ + NULL, /* SIOCGWGENIE */ + NULL, /* SIOCSIWAUTH */ + NULL, /* SIOCGIWAUTH */ + NULL, /* SIOCSIWENCODEEXT */ + NULL, /* SIOCGIWENCODEEXT */ + NULL, /* SIOCSIWPMKSA */ + NULL, /*---hole---*/ +}; + +struct iw_handler_def aic_handlers_def = { + .standard = aic_handlers, + .num_standard = sizeof(aic_handlers) / sizeof(iw_handler), +#if WIRELESS_EXT >= 17 + .get_wireless_stats = aicwf_get_wireless_stats, +#endif +}; + +void aicwf_set_wireless_ext( struct net_device *ndev, struct rwnx_hw *rwnx_hw){ + + AICWFDBG(LOGINFO, "%s Enter", __func__); + + init_completion(&rwnx_hw->wext_scan_com); + INIT_LIST_HEAD(&rwnx_hw->wext_scanre_list); + + ndev->wireless_handlers = (struct iw_handler_def *)&aic_handlers_def; +} + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_wext_linux.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_wext_linux.h new file mode 100644 index 0000000..4a9c4ec --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/aicwf_wext_linux.h @@ -0,0 +1,11 @@ + +struct scanu_result_wext{ + struct list_head scanu_re_list; + struct cfg80211_bss *bss; + struct scanu_result_ind *ind; + u32_l *payload; +}; + +void aicwf_set_wireless_ext( struct net_device *ndev, struct rwnx_hw *rwnx_hw); +void aicwf_scan_complete_event(struct net_device *dev); + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/hal_desc.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/hal_desc.h new file mode 100644 index 0000000..e0a13a8 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/hal_desc.h @@ -0,0 +1,376 @@ +/** + ****************************************************************************** + * + * @file hal_desc.h + * + * @brief File containing the definition of HW descriptors. + * + * Contains the definition and structures used by HW + * + * Copyright (C) RivieraWaves 2011-2019 + * + ****************************************************************************** + */ + +#ifndef _HAL_DESC_H_ +#define _HAL_DESC_H_ + +#include "lmac_types.h" + +/* Rate and policy table */ + +#define N_CCK 8 +#define N_OFDM 8 +#define N_HT (8 * 2 * 2 * 4) +#define N_VHT (10 * 4 * 2 * 8) +#define N_HE_SU (12 * 4 * 3 * 8) +#define N_HE_MU (12 * 6 * 3 * 8) + +/* conversion table from NL80211 to MACHW enum */ +extern const int chnl2bw[]; + +/* conversion table from MACHW to NL80211 enum */ +extern const int bw2chnl[]; + +/* Rate cntrl info */ +#define MCS_INDEX_TX_RCX_OFT 0 +#define MCS_INDEX_TX_RCX_MASK (0x7F << MCS_INDEX_TX_RCX_OFT) +#define BW_TX_RCX_OFT 7 +#define BW_TX_RCX_MASK (0x3 << BW_TX_RCX_OFT) +#define SHORT_GI_TX_RCX_OFT 9 +#define SHORT_GI_TX_RCX_MASK (0x1 << SHORT_GI_TX_RCX_OFT) +#define PRE_TYPE_TX_RCX_OFT 10 +#define PRE_TYPE_TX_RCX_MASK (0x1 << PRE_TYPE_TX_RCX_OFT) +#define FORMAT_MOD_TX_RCX_OFT 11 +#define FORMAT_MOD_TX_RCX_MASK (0x7 << FORMAT_MOD_TX_RCX_OFT) + +/* Values for formatModTx */ +#define FORMATMOD_NON_HT 0 +#define FORMATMOD_NON_HT_DUP_OFDM 1 +#define FORMATMOD_HT_MF 2 +#define FORMATMOD_HT_GF 3 +#define FORMATMOD_VHT 4 +#define FORMATMOD_HE_SU 5 +#define FORMATMOD_HE_MU 6 +#define FORMATMOD_HE_ER 7 + +/* Values for navProtFrmEx */ +#define NAV_PROT_NO_PROT_BIT 0 +#define NAV_PROT_SELF_CTS_BIT 1 +#define NAV_PROT_RTS_CTS_BIT 2 +#define NAV_PROT_RTS_CTS_WITH_QAP_BIT 3 +#define NAV_PROT_STBC_BIT 4 + +/* THD MACCTRLINFO2 fields, used in struct umacdesc umac.flags */ +/// WhichDescriptor definition - contains aMPDU bit and position value +/// Offset of WhichDescriptor field in the MAC CONTROL INFO 2 word +#define WHICHDESC_OFT 19 +/// Mask of the WhichDescriptor field +#define WHICHDESC_MSK (0x07 << WHICHDESC_OFT) +/// Only 1 THD possible, describing an unfragmented MSDU +#define WHICHDESC_UNFRAGMENTED_MSDU (0x00 << WHICHDESC_OFT) +/// THD describing the first MPDU of a fragmented MSDU +#define WHICHDESC_FRAGMENTED_MSDU_FIRST (0x01 << WHICHDESC_OFT) +/// THD describing intermediate MPDUs of a fragmented MSDU +#define WHICHDESC_FRAGMENTED_MSDU_INT (0x02 << WHICHDESC_OFT) +/// THD describing the last MPDU of a fragmented MSDU +#define WHICHDESC_FRAGMENTED_MSDU_LAST (0x03 << WHICHDESC_OFT) +/// THD for extra descriptor starting an AMPDU +#define WHICHDESC_AMPDU_EXTRA (0x04 << WHICHDESC_OFT) +/// THD describing the first MPDU of an A-MPDU +#define WHICHDESC_AMPDU_FIRST (0x05 << WHICHDESC_OFT) +/// THD describing intermediate MPDUs of an A-MPDU +#define WHICHDESC_AMPDU_INT (0x06 << WHICHDESC_OFT) +/// THD describing the last MPDU of an A-MPDU +#define WHICHDESC_AMPDU_LAST (0x07 << WHICHDESC_OFT) + +/// aMPDU bit offset +#define AMPDU_OFT 21 +/// aMPDU bit +#define AMPDU_BIT CO_BIT(AMPDU_OFT) + +enum { + HW_RATE_1MBPS = 0, + HW_RATE_2MBPS = 1, + HW_RATE_5_5MBPS = 2, + HW_RATE_11MBPS = 3, + HW_RATE_6MBPS = 4, + HW_RATE_9MBPS = 5, + HW_RATE_12MBPS = 6, + HW_RATE_18MBPS = 7, + HW_RATE_24MBPS = 8, + HW_RATE_36MBPS = 9, + HW_RATE_48MBPS = 10, + HW_RATE_54MBPS = 11, + HW_RATE_MAX +}; + +union rwnx_mcs_index { + struct { + u32 mcs : 3; + u32 nss : 2; + } ht; + struct { + u32 mcs : 4; + u32 nss : 3; + } vht; + struct { + u32 mcs : 4; + u32 nss : 3; + } he; + u32 legacy : 7; +}; + +/* c.f RW-WLAN-nX-MAC-HW-UM */ +union rwnx_rate_ctrl_info { + struct { + u32 mcsIndexTx : 7; + u32 bwTx : 2; + u32 giAndPreTypeTx : 2; + u32 formatModTx : 3; + u32 navProtFrmEx : 3; + u32 mcsIndexProtTx : 7; + u32 bwProtTx : 2; + u32 formatModProtTx : 3; + u32 nRetry : 3; + }; + u32 value; +}; + +/* c.f RW-WLAN-nX-MAC-HW-UM */ +struct rwnx_power_ctrl_info { + u32 txPwrLevelPT : 8; + u32 txPwrLevelProtPT : 8; + u32 reserved :16; +}; + +/* c.f RW-WLAN-nX-MAC-HW-UM */ +union rwnx_pol_phy_ctrl_info_1 { + struct { + u32 rsvd1 : 3; + u32 bfFrmEx : 1; + u32 numExtnSS : 2; + u32 fecCoding : 1; + u32 stbc : 2; + u32 rsvd2 : 5; + u32 nTx : 3; + u32 nTxProt : 3; + }; + u32 value; +}; + +/* c.f RW-WLAN-nX-MAC-HW-UM */ +union rwnx_pol_phy_ctrl_info_2 { + struct { + u32 antennaSet : 8; + u32 smmIndex : 8; + u32 beamFormed : 1; + }; + u32 value; +}; + +/* c.f RW-WLAN-nX-MAC-HW-UM */ +union rwnx_pol_mac_ctrl_info_1 { + struct { + u32 keySRamIndex : 10; + u32 keySRamIndexRA : 10; + }; + u32 value; +}; + +/* c.f RW-WLAN-nX-MAC-HW-UM */ +union rwnx_pol_mac_ctrl_info_2 { + struct { + u32 longRetryLimit : 8; + u32 shortRetryLimit : 8; + u32 rtsThreshold : 12; + }; + u32 value; +}; + +#define POLICY_TABLE_PATTERN 0xBADCAB1E + +struct tx_policy_tbl { + /* Unique Pattern at the start of Policy Table */ + u32 upatterntx; + /* PHY Control 1 Information used by MAC HW */ + union rwnx_pol_phy_ctrl_info_1 phyctrlinfo_1; + /* PHY Control 2 Information used by MAC HW */ + union rwnx_pol_phy_ctrl_info_2 phyctrlinfo_2; + /* MAC Control 1 Information used by MAC HW */ + union rwnx_pol_mac_ctrl_info_1 macctrlinfo_1; + /* MAC Control 2 Information used by MAC HW */ + union rwnx_pol_mac_ctrl_info_2 macctrlinfo_2; + + union rwnx_rate_ctrl_info ratectrlinfos[NX_TX_MAX_RATES]; + struct rwnx_power_ctrl_info powerctrlinfos[NX_TX_MAX_RATES]; +}; + +#ifdef CONFIG_RWNX_FULLMAC + +/** + * struct rwnx_hw_txstatus - Bitfield of confirmation status + * + * @tx_done: packet has been processed by the firmware. + * @retry_required: packet has been transmitted but not acknoledged. + * Driver must repush it. + * @sw_retry_required: packet has not been transmitted (FW wasn't able to push + * it when it received it: not active channel ...). Driver must repush it. + * @acknowledged: packet has been acknowledged by peer + */ +union rwnx_hw_txstatus { + struct { + u32 tx_done : 1; + u32 retry_required : 1; + u32 sw_retry_required : 1; + u32 acknowledged : 1; + u32 reserved :28; + }; + u32 value; +}; + +/** + * struct tx_cfm_tag - Structure indicating the status and other + * information about the transmission + * + * @pn: PN that was used for the transmission + * @sn: Sequence number of the packet + * @timestamp: Timestamp of first transmission of this MPDU + * @credits: Number of credits to be reallocated for the txq that push this + * buffer (can be 0 or 1) + * @ampdu_size: Size of the ampdu in which the frame has been transmitted if + * this was the last frame of the a-mpdu, and 0 if the frame is not the last + * frame on a a-mdpu. + * 1 means that the frame has been transmitted as a singleton. + * @amsdu_size: Size, in bytes, allowed to create a-msdu. + * @status: transmission status + */ +struct tx_cfm_tag +{ + u16_l pn[4]; + u16_l sn; + u16_l timestamp; + s8_l credits; + u8_l ampdu_size; +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + u16_l amsdu_size; +#endif + union rwnx_hw_txstatus status; +}; + +/** + * struct rwnx_hw_txhdr - Hardware part of tx header + * + * @cfm: Information updated by fw/hardware after sending a frame + */ +struct rwnx_hw_txhdr { + struct tx_cfm_tag cfm; +}; + +#endif /* CONFIG_RWNX_FULLMAC */ + +/* Modem */ + +#define MDM_PHY_CONFIG_TRIDENT 0 +#define MDM_PHY_CONFIG_ELMA 1 +#define MDM_PHY_CONFIG_KARST 2 + +// MODEM features (from reg_mdm_stat.h) +/// MUMIMOTX field bit +#define MDM_MUMIMOTX_BIT ((u32)0x80000000) +/// MUMIMOTX field position +#define MDM_MUMIMOTX_POS 31 +/// MUMIMORX field bit +#define MDM_MUMIMORX_BIT ((u32)0x40000000) +/// MUMIMORX field position +#define MDM_MUMIMORX_POS 30 +/// BFMER field bit +#define MDM_BFMER_BIT ((u32)0x20000000) +/// BFMER field position +#define MDM_BFMER_POS 29 +/// BFMEE field bit +#define MDM_BFMEE_BIT ((u32)0x10000000) +/// BFMEE field position +#define MDM_BFMEE_POS 28 +/// LDPCDEC field bit +#define MDM_LDPCDEC_BIT ((u32)0x08000000) +/// LDPCDEC field position +#define MDM_LDPCDEC_POS 27 +/// LDPCENC field bit +#define MDM_LDPCENC_BIT ((u32)0x04000000) +/// LDPCENC field position +#define MDM_LDPCENC_POS 26 +/// CHBW field mask +#define MDM_CHBW_MASK ((u32)0x03000000) +/// CHBW field LSB position +#define MDM_CHBW_LSB 24 +/// CHBW field width +#define MDM_CHBW_WIDTH ((u32)0x00000002) +/// DSSSCCK field bit +#define MDM_DSSSCCK_BIT ((u32)0x00800000) +/// DSSSCCK field position +#define MDM_DSSSCCK_POS 23 +/// VHT field bit +#define MDM_VHT_BIT ((u32)0x00400000) +/// VHT field position +#define MDM_VHT_POS 22 +/// HE field bit +#define MDM_HE_BIT ((u32)0x00200000) +/// HE field position +#define MDM_HE_POS 21 +/// ESS field bit +#define MDM_ESS_BIT ((u32)0x00100000) +/// ESS field position +#define MDM_ESS_POS 20 +/// RFMODE field mask +#define MDM_RFMODE_MASK ((u32)0x000F0000) +/// RFMODE field LSB position +#define MDM_RFMODE_LSB 16 +/// RFMODE field width +#define MDM_RFMODE_WIDTH ((u32)0x00000004) +/// NSTS field mask +#define MDM_NSTS_MASK ((u32)0x0000F000) +/// NSTS field LSB position +#define MDM_NSTS_LSB 12 +/// NSTS field width +#define MDM_NSTS_WIDTH ((u32)0x00000004) +/// NSS field mask +#define MDM_NSS_MASK ((u32)0x00000F00) +/// NSS field LSB position +#define MDM_NSS_LSB 8 +/// NSS field width +#define MDM_NSS_WIDTH ((u32)0x00000004) +/// NTX field mask +#define MDM_NTX_MASK ((u32)0x000000F0) +/// NTX field LSB position +#define MDM_NTX_LSB 4 +/// NTX field width +#define MDM_NTX_WIDTH ((u32)0x00000004) +/// NRX field mask +#define MDM_NRX_MASK ((u32)0x0000000F) +/// NRX field LSB position +#define MDM_NRX_LSB 0 +/// NRX field width +#define MDM_NRX_WIDTH ((u32)0x00000004) + +#define __MDM_PHYCFG_FROM_VERS(v) (((v) & MDM_RFMODE_MASK) >> MDM_RFMODE_LSB) + +#define RIU_FCU_PRESENT_MASK ((u32)0xFF000000) +#define RIU_FCU_PRESENT_LSB 24 + +#define __RIU_FCU_PRESENT(v) (((v) & RIU_FCU_PRESENT_MASK) >> RIU_FCU_PRESENT_LSB == 5) + +/// AGC load version field mask +#define RIU_AGC_LOAD_MASK ((u32)0x00C00000) +/// AGC load version field LSB position +#define RIU_AGC_LOAD_LSB 22 + +#define __RIU_AGCLOAD_FROM_VERS(v) (((v) & RIU_AGC_LOAD_MASK) >> RIU_AGC_LOAD_LSB) + +#define __FPGA_TYPE(v) (((v) & 0xFFFF0000) >> 16) + +#define __MDM_MAJOR_VERSION(v) (((v) & 0xFF000000) >> 24) +#define __MDM_MINOR_VERSION(v) (((v) & 0x00FF0000) >> 16) + + +#endif // _HAL_DESC_H_ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_compat.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_compat.h new file mode 100644 index 0000000..4601cfa --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_compat.h @@ -0,0 +1,25 @@ +/** + **************************************************************************************** + * + * @file ipc_compat.h + * + * Copyright (C) RivieraWaves 2011-2019 + * + **************************************************************************************** + */ + +#ifndef _IPC_H_ +#define _IPC_H_ + +#define __INLINE static __attribute__((__always_inline__)) inline + +#define __ALIGN4 __aligned(4) + +#define ASSERT_ERR(condition) \ + do { \ + if (unlikely(!(condition))) { \ + printk(KERN_ERR "%s:%d:ASSERT_ERR(" #condition ")\n", __FILE__, __LINE__); \ + } \ + } while(0) + +#endif /* _IPC_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_host.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_host.c new file mode 100644 index 0000000..7534be4 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_host.c @@ -0,0 +1,771 @@ +/** + ****************************************************************************** + * + * @file ipc_host.c + * + * @brief IPC module. + * + * Copyright (C) RivieraWaves 2011-2019 + * + ****************************************************************************** + */ + +/* + * INCLUDE FILES + ****************************************************************************** + */ +#ifndef __KERNEL__ +#include +#define REG_SW_SET_PROFILING(env, value) do{ }while(0) +#define REG_SW_CLEAR_PROFILING(env, value) do{ }while(0) +#define REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env) do{ }while(0) +#define REG_SW_SET_HOSTBUF_IDX_PROFILING(env, val) do{ }while(0) +#else +#include +#include "rwnx_defs.h" +#include "rwnx_prof.h" +#endif + +#include "reg_ipc_app.h" +#include "ipc_host.h" + +/* + * TYPES DEFINITION + ****************************************************************************** + */ + +const int nx_txdesc_cnt[] = +{ + NX_TXDESC_CNT0, + NX_TXDESC_CNT1, + NX_TXDESC_CNT2, + NX_TXDESC_CNT3, + #if NX_TXQ_CNT == 5 + NX_TXDESC_CNT4, + #endif +}; + +const int nx_txdesc_cnt_msk[] = +{ + NX_TXDESC_CNT0 - 1, + NX_TXDESC_CNT1 - 1, + NX_TXDESC_CNT2 - 1, + NX_TXDESC_CNT3 - 1, + #if NX_TXQ_CNT == 5 + NX_TXDESC_CNT4 - 1, + #endif +}; + +const int nx_txuser_cnt[] = +{ + CONFIG_USER_MAX, + CONFIG_USER_MAX, + CONFIG_USER_MAX, + CONFIG_USER_MAX, + #if NX_TXQ_CNT == 5 + 1, + #endif +}; + + +/* + * FUNCTIONS DEFINITIONS + ****************************************************************************** + */ +/** + * ipc_host_rxdesc_handler() - Handle the reception of a Rx Descriptor + * + * @env: pointer to the IPC Host environment + * + * Called from general IRQ handler when status %IPC_IRQ_E2A_RXDESC is set + */ +static void ipc_host_rxdesc_handler(struct ipc_host_env_tag *env) +{ + // For profiling + REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_RXDESC); + + // LMAC has triggered an IT saying that a reception has occurred. + // Then we first need to check the validity of the current hostbuf, and the validity + // of the next hostbufs too, because it is likely that several hostbufs have been + // filled within the time needed for this irq handling + do { + #ifdef CONFIG_RWNX_FULLMAC + // call the external function to indicate that a RX descriptor is received + if (env->cb.recv_data_ind(env->pthis, + env->ipc_host_rxdesc_array[env->ipc_host_rxdesc_idx].hostid) != 0) + #else + // call the external function to indicate that a RX packet is received + if (env->cb.recv_data_ind(env->pthis, + env->ipc_host_rxbuf_array[env->ipc_host_rxbuf_idx].hostid) != 0) + #endif //(CONFIG_RWNX_FULLMAC) + break; + + }while(1); + + // For profiling + REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_RXDESC); +} + +/** + * ipc_host_radar_handler() - Handle the reception of radar events + * + * @env: pointer to the IPC Host environment + * + * Called from general IRQ handler when status %IPC_IRQ_E2A_RADAR is set + */ +static void ipc_host_radar_handler(struct ipc_host_env_tag *env) +{ +#ifdef CONFIG_RWNX_RADAR + // LMAC has triggered an IT saying that a radar event has been sent to upper layer. + // Then we first need to check the validity of the current msg buf, and the validity + // of the next buffers too, because it is likely that several buffers have been + // filled within the time needed for this irq handling + // call the external function to indicate that a RX packet is received + spin_lock_bh(&((struct rwnx_hw *)env->pthis)->radar.lock); + while (env->cb.recv_radar_ind(env->pthis, + env->ipc_host_radarbuf_array[env->ipc_host_radarbuf_idx].hostid) == 0) + ; + spin_unlock_bh(&((struct rwnx_hw *)env->pthis)->radar.lock); +#endif /* CONFIG_RWNX_RADAR */ +} + +/** + * ipc_host_unsup_rx_vec_handler() - Handle the reception of unsupported rx vector + * + * @env: pointer to the IPC Host environment + * + * Called from general IRQ handler when status %IPC_IRQ_E2A_UNSUP_RX_VEC is set + */ +static void ipc_host_unsup_rx_vec_handler(struct ipc_host_env_tag *env) +{ + while (env->cb.recv_unsup_rx_vec_ind(env->pthis, + env->ipc_host_unsuprxvecbuf_array[env->ipc_host_unsuprxvecbuf_idx].hostid) == 0) + ; +} + +/** + * ipc_host_msg_handler() - Handler for firmware message + * + * @env: pointer to the IPC Host environment + * + * Called from general IRQ handler when status %IPC_IRQ_E2A_MSG is set + */ +static void ipc_host_msg_handler(struct ipc_host_env_tag *env) +{ + // For profiling + REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_MSG); + + // LMAC has triggered an IT saying that a message has been sent to upper layer. + // Then we first need to check the validity of the current msg buf, and the validity + // of the next buffers too, because it is likely that several buffers have been + // filled within the time needed for this irq handling + // call the external function to indicate that a RX packet is received + while (env->cb.recv_msg_ind(env->pthis, + env->ipc_host_msgbuf_array[env->ipc_host_msge2a_idx].hostid) == 0) + ; + + + // For profiling + REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_MSG); + +} + +/** + * ipc_host_msgack_handler() - Handle the reception of message acknowledgement + * + * @env: pointer to the IPC Host environment + * + * Called from general IRQ handler when status %IPC_IRQ_E2A_MSG_ACK is set + */ +static void ipc_host_msgack_handler(struct ipc_host_env_tag *env) +{ + void *hostid = env->msga2e_hostid; + + ASSERT_ERR(hostid); + ASSERT_ERR(env->msga2e_cnt == (((struct lmac_msg *)(&env->shared->msg_a2e_buf.msg))->src_id & 0xFF)); + + env->msga2e_hostid = NULL; + env->msga2e_cnt++; + env->cb.recv_msgack_ind(env->pthis, hostid); +} + +/** + * ipc_host_dbg_handler() - Handle the reception of Debug event + * + * @env: pointer to the IPC Host environment + * + * Called from general IRQ handler when status %IPC_IRQ_E2A_DBG is set + */ +static void ipc_host_dbg_handler(struct ipc_host_env_tag *env) +{ + // For profiling + REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_DBG); + + // LMAC has triggered an IT saying that a DBG message has been sent to upper layer. + // Then we first need to check the validity of the current buffer, and the validity + // of the next buffers too, because it is likely that several buffers have been + // filled within the time needed for this irq handling + // call the external function to indicate that a RX packet is received + while(env->cb.recv_dbg_ind(env->pthis, + env->ipc_host_dbgbuf_array[env->ipc_host_dbg_idx].hostid) == 0) + ; + + // For profiling + REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_DBG); +} + +/** + * ipc_host_tx_cfm_handler() - Handle the reception of TX confirmation + * + * @env: pointer to the IPC Host environment + * @queue_idx: index of the hardware on which the confirmation has been received + * @user_pos: index of the user position + * + * Called from general IRQ handler when status %IPC_IRQ_E2A_TXCFM is set + */ +static void ipc_host_tx_cfm_handler(struct ipc_host_env_tag *env, + const int queue_idx, const int user_pos) +{ + // TX confirmation descriptors have been received + REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM); + while (1) + { + // Get the used index and increase it. We do the increase before knowing if the + // current buffer is confirmed because the callback function may call the + // ipc_host_txdesc_get() in case flow control was enabled and the index has to be + // already at the good value to ensure that the test of FIFO full is correct + uint32_t used_idx = env->txdesc_used_idx[queue_idx][user_pos]++; + uint32_t used_idx_mod = used_idx & nx_txdesc_cnt_msk[queue_idx]; + void *host_id = env->tx_host_id[queue_idx][user_pos][used_idx_mod]; + + // Reset the host id in the array + env->tx_host_id[queue_idx][user_pos][used_idx_mod] = 0; + + // call the external function to indicate that a TX packet is freed + if (host_id == 0) + { + // No more confirmations, so put back the used index at its initial value + env->txdesc_used_idx[queue_idx][user_pos] = used_idx; + break; + } + + if (env->cb.send_data_cfm(env->pthis, host_id) != 0) + { + // No more confirmations, so put back the used index at its initial value + env->txdesc_used_idx[queue_idx][user_pos] = used_idx; + env->tx_host_id[queue_idx][user_pos][used_idx_mod] = host_id; + // and exit the loop + break; + } + + REG_SW_SET_PROFILING_CHAN(env->pthis, SW_PROF_CHAN_CTXT_CFM_HDL_BIT); + REG_SW_CLEAR_PROFILING_CHAN(env->pthis, SW_PROF_CHAN_CTXT_CFM_HDL_BIT); + } + + REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM); +} + +/** + ****************************************************************************** + */ +bool ipc_host_tx_frames_pending(struct ipc_host_env_tag *env) +{ + int i, j; + bool tx_frames_pending = false; + + for (i = 0; (i < IPC_TXQUEUE_CNT) && !tx_frames_pending; i++) + { + for (j = 0; j < nx_txuser_cnt[i]; j++) + { + uint32_t used_idx = env->txdesc_used_idx[i][j]; + uint32_t free_idx = env->txdesc_free_idx[i][j]; + + // Check if this queue is empty or not + if (used_idx != free_idx) + { + // The queue is not empty, update the flag and exit + tx_frames_pending = true; + break; + } + } + } + + return (tx_frames_pending); +} + +/** + ****************************************************************************** + */ +void *ipc_host_tx_flush(struct ipc_host_env_tag *env, const int queue_idx, const int user_pos) +{ + uint32_t used_idx = env->txdesc_used_idx[queue_idx][user_pos]; + void *host_id = env->tx_host_id[queue_idx][user_pos][used_idx & nx_txdesc_cnt_msk[queue_idx]]; + + // call the external function to indicate that a TX packet is freed + if (host_id != 0) + { + // Reset the host id in the array + env->tx_host_id[queue_idx][user_pos][used_idx & nx_txdesc_cnt_msk[queue_idx]] = 0; + + // Increment the used index + env->txdesc_used_idx[queue_idx][user_pos]++; + } + + return (host_id); +} + +/** + ****************************************************************************** + */ +void ipc_host_init(struct ipc_host_env_tag *env, + struct ipc_host_cb_tag *cb, + struct ipc_shared_env_tag *shared_env_ptr, + void *pthis) +{ + unsigned int i; + unsigned int size; + unsigned int * dst; + + // Reset the environments + // Reset the IPC Shared memory +#if 0 + /* check potential platform bug on multiple stores */ + memset(shared_env_ptr, 0, sizeof(struct ipc_shared_env_tag)); +#else + dst = (unsigned int *)shared_env_ptr; + size = (unsigned int)sizeof(struct ipc_shared_env_tag); + for (i=0; i < size; i+=4) + { + *dst++ = 0; + } +#endif + // Reset the IPC Host environment + memset(env, 0, sizeof(struct ipc_host_env_tag)); + + // Initialize the shared environment pointer + env->shared = shared_env_ptr; + + // Save the callbacks in our own environment + env->cb = *cb; + + // Save the pointer to the register base + env->pthis = pthis; + + // Initialize buffers numbers and buffers sizes needed for DMA Receptions + env->rx_bufnb = IPC_RXBUF_CNT; + #ifdef CONFIG_RWNX_FULLMAC + env->rxdesc_nb = IPC_RXDESC_CNT; + #endif //(CONFIG_RWNX_FULLMAC) + env->radar_bufnb = IPC_RADARBUF_CNT; + env->radar_bufsz = sizeof(struct radar_pulse_array_desc); + env->unsuprxvec_bufnb = IPC_UNSUPRXVECBUF_CNT; + env->unsuprxvec_bufsz = max(sizeof(struct rx_vector_desc), (size_t) RADIOTAP_HDR_MAX_LEN) + + RADIOTAP_HDR_VEND_MAX_LEN + UNSUP_RX_VEC_DATA_LEN; + env->ipc_e2amsg_bufnb = IPC_MSGE2A_BUF_CNT; + env->ipc_e2amsg_bufsz = sizeof(struct ipc_e2a_msg); + env->ipc_dbg_bufnb = IPC_DBGBUF_CNT; + env->ipc_dbg_bufsz = sizeof(struct ipc_dbg_msg); + + for (i = 0; i < CONFIG_USER_MAX; i++) + { + // Initialize the pointers to the hostid arrays + env->tx_host_id[0][i] = env->tx_host_id0[i]; + env->tx_host_id[1][i] = env->tx_host_id1[i]; + env->tx_host_id[2][i] = env->tx_host_id2[i]; + env->tx_host_id[3][i] = env->tx_host_id3[i]; + #if NX_TXQ_CNT == 5 + env->tx_host_id[4][i] = NULL; + #endif + + // Initialize the pointers to the TX descriptor arrays + env->txdesc[0][i] = shared_env_ptr->txdesc0[i]; + env->txdesc[1][i] = shared_env_ptr->txdesc1[i]; + env->txdesc[2][i] = shared_env_ptr->txdesc2[i]; + env->txdesc[3][i] = shared_env_ptr->txdesc3[i]; + #if NX_TXQ_CNT == 5 + env->txdesc[4][i] = NULL; + #endif + } + + #if NX_TXQ_CNT == 5 + env->tx_host_id[4][0] = env->tx_host_id4[0]; + env->txdesc[4][0] = shared_env_ptr->txdesc4[0]; + #endif +} + +/** + ****************************************************************************** + */ +void ipc_host_patt_addr_push(struct ipc_host_env_tag *env, uint32_t addr) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + // Copy the address + shared_env_ptr->pattern_addr = addr; +} + +/** + ****************************************************************************** + */ +int ipc_host_rxbuf_push(struct ipc_host_env_tag *env, +#ifdef CONFIG_RWNX_FULLMAC + uint32_t hostid, +#endif + uint32_t hostbuf) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env->pthis); + REG_SW_SET_HOSTBUF_IDX_PROFILING(env->pthis, env->ipc_host_rxbuf_idx); + +#ifdef CONFIG_RWNX_FULLMAC + // Copy the hostbuf (DMA address) in the ipc shared memory + shared_env_ptr->host_rxbuf[env->ipc_host_rxbuf_idx].hostid = hostid; + shared_env_ptr->host_rxbuf[env->ipc_host_rxbuf_idx].dma_addr = hostbuf; +#else + // Save the hostid and the hostbuf in global array + env->ipc_host_rxbuf_array[env->ipc_host_rxbuf_idx].hostid = hostid; + env->ipc_host_rxbuf_array[env->ipc_host_rxbuf_idx].dma_addr = hostbuf; + + shared_env_ptr->host_rxbuf[env->ipc_host_rxbuf_idx] = hostbuf; +#endif //(CONFIG_RWNX_FULLMAC) + + // Signal to the embedded CPU that at least one buffer is available + ipc_app2emb_trigger_set(shared_env_ptr, IPC_IRQ_A2E_RXBUF_BACK); + + // Increment the array index + env->ipc_host_rxbuf_idx = (env->ipc_host_rxbuf_idx +1)%IPC_RXBUF_CNT; + + return (0); +} + +#ifdef CONFIG_RWNX_FULLMAC +/** + ****************************************************************************** + */ +int ipc_host_rxdesc_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + // Reset the RX Descriptor DMA Address and increment the counter + env->ipc_host_rxdesc_array[env->ipc_host_rxdesc_idx].dma_addr = hostbuf; + env->ipc_host_rxdesc_array[env->ipc_host_rxdesc_idx].hostid = hostid; + + shared_env_ptr->host_rxdesc[env->ipc_host_rxdesc_idx].dma_addr = hostbuf; + + // Signal to the embedded CPU that at least one descriptor is available + ipc_app2emb_trigger_set(shared_env_ptr, IPC_IRQ_A2E_RXDESC_BACK); + + env->ipc_host_rxdesc_idx = (env->ipc_host_rxdesc_idx + 1) % IPC_RXDESC_CNT; + + return (0); +} +#endif /* CONFIG_RWNX_FULLMAC */ + +/** + ****************************************************************************** + */ +int ipc_host_radarbuf_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + // Save the hostid and the hostbuf in global array + env->ipc_host_radarbuf_array[env->ipc_host_radarbuf_idx].hostid = hostid; + env->ipc_host_radarbuf_array[env->ipc_host_radarbuf_idx].dma_addr = hostbuf; + + // Copy the hostbuf (DMA address) in the ipc shared memory + shared_env_ptr->radarbuf_hostbuf[env->ipc_host_radarbuf_idx] = hostbuf; + + // Increment the array index + env->ipc_host_radarbuf_idx = (env->ipc_host_radarbuf_idx +1)%IPC_RADARBUF_CNT; + + return (0); +} + +/** + ****************************************************************************** + */ + +int ipc_host_unsup_rx_vec_buf_push(struct ipc_host_env_tag *env, + void *hostid, + uint32_t hostbuf) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + env->ipc_host_unsuprxvecbuf_array[env->ipc_host_unsuprxvecbuf_idx].hostid = hostid; + env->ipc_host_unsuprxvecbuf_array[env->ipc_host_unsuprxvecbuf_idx].dma_addr = hostbuf; + + // Copy the hostbuf (DMA address) in the ipc shared memory + shared_env_ptr->unsuprxvecbuf_hostbuf[env->ipc_host_unsuprxvecbuf_idx] = hostbuf; + + // Increment the array index + env->ipc_host_unsuprxvecbuf_idx = (env->ipc_host_unsuprxvecbuf_idx + 1)%IPC_UNSUPRXVECBUF_CNT; + + return (0); +} + +/** + ****************************************************************************** + */ +int ipc_host_msgbuf_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + // Save the hostid and the hostbuf in global array + env->ipc_host_msgbuf_array[env->ipc_host_msge2a_idx].hostid = hostid; + env->ipc_host_msgbuf_array[env->ipc_host_msge2a_idx].dma_addr = hostbuf; + + // Copy the hostbuf (DMA address) in the ipc shared memory + shared_env_ptr->msg_e2a_hostbuf_addr[env->ipc_host_msge2a_idx] = hostbuf; + + // Increment the array index + env->ipc_host_msge2a_idx = (env->ipc_host_msge2a_idx +1)%IPC_MSGE2A_BUF_CNT; + + return (0); +} + +/** + ****************************************************************************** + */ +int ipc_host_dbgbuf_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + // Save the hostid and the hostbuf in global array + env->ipc_host_dbgbuf_array[env->ipc_host_dbg_idx].hostid = hostid; + env->ipc_host_dbgbuf_array[env->ipc_host_dbg_idx].dma_addr = hostbuf; + + // Copy the hostbuf (DMA address) in the ipc shared memory + shared_env_ptr->dbg_hostbuf_addr[env->ipc_host_dbg_idx] = hostbuf; + + // Increment the array index + env->ipc_host_dbg_idx = (env->ipc_host_dbg_idx +1)%IPC_DBGBUF_CNT; + + return (0); +} + +/** + ****************************************************************************** + */ +void ipc_host_dbginfobuf_push(struct ipc_host_env_tag *env, uint32_t infobuf) +{ + struct ipc_shared_env_tag *shared_env_ptr = env->shared; + + // Copy the hostbuf (DMA address) in the ipc shared memory + shared_env_ptr->la_dbginfo_addr = infobuf; +} + +/** + ****************************************************************************** + */ +volatile struct txdesc_host *ipc_host_txdesc_get(struct ipc_host_env_tag *env, const int queue_idx, const int user_pos) +{ + volatile struct txdesc_host *txdesc_free; + uint32_t used_idx = env->txdesc_used_idx[queue_idx][user_pos]; + uint32_t free_idx = env->txdesc_free_idx[queue_idx][user_pos]; + + ASSERT_ERR(queue_idx < IPC_TXQUEUE_CNT); + ASSERT_ERR((free_idx - used_idx) <= nx_txdesc_cnt[queue_idx]); + + // Check if a free descriptor is available + if (free_idx != (used_idx + nx_txdesc_cnt[queue_idx])) + { + // Get the pointer to the first free descriptor + txdesc_free = env->txdesc[queue_idx][user_pos] + (free_idx & nx_txdesc_cnt_msk[queue_idx]); + } + else + { + txdesc_free = NULL; + } + + return txdesc_free; +} + +/** + ****************************************************************************** + */ +void ipc_host_txdesc_push(struct ipc_host_env_tag *env, const int queue_idx, + const int user_pos, void *host_id) +{ + uint32_t free_idx = env->txdesc_free_idx[queue_idx][user_pos] & nx_txdesc_cnt_msk[queue_idx]; + volatile struct txdesc_host *txdesc_pushed = env->txdesc[queue_idx][user_pos] + free_idx; + + + // Descriptor is now ready + txdesc_pushed->ready = 0xFFFFFFFF; + + // Save the host id in the environment + env->tx_host_id[queue_idx][user_pos][free_idx] = host_id; + + // Increment the index + env->txdesc_free_idx[queue_idx][user_pos]++; + + // trigger interrupt!!! + //REG_SW_SET_PROFILING(env->pthis, CO_BIT(queue_idx+SW_PROF_IRQ_A2E_TXDESC_FIRSTBIT)); + ipc_app2emb_trigger_setf(env->shared, CO_BIT(user_pos + queue_idx * CONFIG_USER_MAX + + IPC_IRQ_A2E_TXDESC_FIRSTBIT)); +} + +/** + ****************************************************************************** + */ +void ipc_host_irq(struct ipc_host_env_tag *env, uint32_t status) +{ + // Acknowledge the pending interrupts + ipc_emb2app_ack_clear(env->shared, status); + // And re-read the status, just to be sure that the acknowledgment is + // effective when we start the interrupt handling + ipc_emb2app_status_get(env->shared); + + // Optimized for only one IRQ at a time + if (status & IPC_IRQ_E2A_RXDESC) + { + // handle the RX descriptor reception + ipc_host_rxdesc_handler(env); + } + if (status & IPC_IRQ_E2A_MSG_ACK) + { + ipc_host_msgack_handler(env); + } + if (status & IPC_IRQ_E2A_MSG) + { + ipc_host_msg_handler(env); + } + if (status & IPC_IRQ_E2A_TXCFM) + { + int i; + +#ifdef __KERNEL__ + spin_lock_bh(&((struct rwnx_hw *)env->pthis)->tx_lock); +#endif + // handle the TX confirmation reception + for (i = 0; i < IPC_TXQUEUE_CNT; i++) + { + int j = 0; +#ifdef CONFIG_RWNX_MUMIMO_TX + for (; j < nx_txuser_cnt[i]; j++) +#endif + { + uint32_t q_bit = CO_BIT(j + i * CONFIG_USER_MAX + IPC_IRQ_E2A_TXCFM_POS); + if (status & q_bit) + { + // handle the confirmation + ipc_host_tx_cfm_handler(env, i, j); + } + } + } +#ifdef __KERNEL__ + spin_unlock_bh(&((struct rwnx_hw *)env->pthis)->tx_lock); +#endif + } + if (status & IPC_IRQ_E2A_RADAR) + { + // handle the radar event reception + ipc_host_radar_handler(env); + } + + if (status & IPC_IRQ_E2A_UNSUP_RX_VEC) + { + // handle the unsupported rx vector reception + ipc_host_unsup_rx_vec_handler(env); + } + + if (status & IPC_IRQ_E2A_DBG) + { + ipc_host_dbg_handler(env); + } + + if (status & IPC_IRQ_E2A_TBTT_PRIM) + { + env->cb.prim_tbtt_ind(env->pthis); + } + + if (status & IPC_IRQ_E2A_TBTT_SEC) + { + env->cb.sec_tbtt_ind(env->pthis); + } +} + +/** + ****************************************************************************** + */ +int ipc_host_msg_push(struct ipc_host_env_tag *env, void *msg_buf, uint16_t len) +{ + int i; + uint32_t *src, *dst; + + REG_SW_SET_PROFILING(env->pthis, SW_PROF_IPC_MSGPUSH); + + ASSERT_ERR(!env->msga2e_hostid); + ASSERT_ERR(round_up(len, 4) <= sizeof(env->shared->msg_a2e_buf.msg)); + + // Copy the message into the IPC MSG buffer +#ifdef __KERNEL__ + src = (uint32_t*)((struct rwnx_cmd *)msg_buf)->a2e_msg; +#else + src = (uint32_t*) msg_buf; +#endif + dst = (uint32_t*)&(env->shared->msg_a2e_buf.msg); + + // Copy the message in the IPC queue + for (i=0; imsga2e_hostid = msg_buf; + + // Trigger the irq to send the message to EMB + ipc_app2emb_trigger_set(env->shared, IPC_IRQ_A2E_MSG); + + REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IPC_MSGPUSH); + + return (0); +} + +/** + ****************************************************************************** + */ +void ipc_host_enable_irq(struct ipc_host_env_tag *env, uint32_t value) +{ + // Enable the handled interrupts + ipc_emb2app_unmask_set(env->shared, value); +} + +/** + ****************************************************************************** + */ +void ipc_host_disable_irq(struct ipc_host_env_tag *env, uint32_t value) +{ + // Enable the handled interrupts + ipc_emb2app_unmask_clear(env->shared, value); +} + +/** + ****************************************************************************** + */ +uint32_t ipc_host_get_status(struct ipc_host_env_tag *env) +{ + volatile uint32_t status; + + status = ipc_emb2app_status_get(env->shared); + + return status; +} + +/** + ****************************************************************************** + */ +uint32_t ipc_host_get_rawstatus(struct ipc_host_env_tag *env) +{ + volatile uint32_t rawstatus; + + rawstatus = ipc_emb2app_rawstatus_get(env->shared); + + return rawstatus; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_host.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_host.h new file mode 100644 index 0000000..c008a31 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_host.h @@ -0,0 +1,508 @@ +/** + ****************************************************************************** + * + * @file ipc_host.h + * + * @brief IPC module. + * + * Copyright (C) RivieraWaves 2011-2019 + * + ****************************************************************************** + */ +#ifndef _IPC_HOST_H_ +#define _IPC_HOST_H_ + +/* + * INCLUDE FILES + ****************************************************************************** + */ +#include "ipc_shared.h" +#ifndef __KERNEL__ +#include "arch.h" +#else +#include "ipc_compat.h" +#endif + +/* + * ENUMERATION + ****************************************************************************** + */ + +enum ipc_host_desc_status +{ + /// Descriptor is IDLE + IPC_HOST_DESC_IDLE = 0, + /// Data can be forwarded + IPC_HOST_DESC_FORWARD, + /// Data has to be kept in UMAC memory + IPC_HOST_DESC_KEEP, + /// Delete stored packet + IPC_HOST_DESC_DELETE, + /// Update Frame Length status + IPC_HOST_DESC_LEN_UPDATE, +}; + +/** + ****************************************************************************** + * @brief This structure is used to initialize the MAC SW + * + * The WLAN device driver provides functions call-back with this structure + ****************************************************************************** + */ +struct ipc_host_cb_tag +{ + /// WLAN driver call-back function: send_data_cfm + int (*send_data_cfm)(void *pthis, void *host_id); + + /// WLAN driver call-back function: recv_data_ind + uint8_t (*recv_data_ind)(void *pthis, void *host_id); + + /// WLAN driver call-back function: recv_radar_ind + uint8_t (*recv_radar_ind)(void *pthis, void *host_id); + + /// WLAN driver call-back function: recv_unsup_rx_vec_ind + uint8_t (*recv_unsup_rx_vec_ind)(void *pthis, void *host_id); + + /// WLAN driver call-back function: recv_msg_ind + uint8_t (*recv_msg_ind)(void *pthis, void *host_id); + + /// WLAN driver call-back function: recv_msgack_ind + uint8_t (*recv_msgack_ind)(void *pthis, void *host_id); + + /// WLAN driver call-back function: recv_dbg_ind + uint8_t (*recv_dbg_ind)(void *pthis, void *host_id); + + /// WLAN driver call-back function: prim_tbtt_ind + void (*prim_tbtt_ind)(void *pthis); + + /// WLAN driver call-back function: sec_tbtt_ind + void (*sec_tbtt_ind)(void *pthis); + +}; + +/* + * Struct used to store information about host buffers (DMA Address and local pointer) + */ +struct ipc_hostbuf +{ + void *hostid; ///< ptr to hostbuf client (ipc_host client) structure + uint32_t dma_addr; ///< ptr to real hostbuf dma address +}; + +/// Definition of the IPC Host environment structure. +struct ipc_host_env_tag +{ + /// Structure containing the callback pointers + struct ipc_host_cb_tag cb; + + /// Pointer to the shared environment + struct ipc_shared_env_tag *shared; + + #ifdef CONFIG_RWNX_FULLMAC + // Array used to store the descriptor addresses + struct ipc_hostbuf ipc_host_rxdesc_array[IPC_RXDESC_CNT]; + // Index of the host RX descriptor array (ipc_shared environment) + uint8_t ipc_host_rxdesc_idx; + /// Store the number of RX Descriptors + uint8_t rxdesc_nb; + #endif //(CONFIG_RWNX_FULLMAC) + + /// Fields for Data Rx handling + // Index used for ipc_host_rxbuf_array to point to current buffer + uint8_t ipc_host_rxbuf_idx; + // Store the number of Rx Data buffers + uint32_t rx_bufnb; + // Store the size of the Rx Data buffers + uint32_t rx_bufsz; + + /// Fields for Radar events handling + // Global array used to store the hostid and hostbuf addresses + struct ipc_hostbuf ipc_host_radarbuf_array[IPC_RADARBUF_CNT]; + // Index used for ipc_host_rxbuf_array to point to current buffer + uint8_t ipc_host_radarbuf_idx; + // Store the number of radar event buffers + uint32_t radar_bufnb; + // Store the size of the radar event buffers + uint32_t radar_bufsz; + + ///Fields for Unsupported frame handling + // Global array used to store the hostid and hostbuf addresses + struct ipc_hostbuf ipc_host_unsuprxvecbuf_array[IPC_UNSUPRXVECBUF_CNT]; + // Index used for ipc_host_unsuprxvecbuf_array to point to current buffer + uint8_t ipc_host_unsuprxvecbuf_idx; + // Store the number of unsupported rx vector buffers + uint32_t unsuprxvec_bufnb; + // Store the size of unsupported rx vector buffers + uint32_t unsuprxvec_bufsz; + + // Index used that points to the first free TX desc + uint32_t txdesc_free_idx[IPC_TXQUEUE_CNT][CONFIG_USER_MAX]; + // Index used that points to the first used TX desc + uint32_t txdesc_used_idx[IPC_TXQUEUE_CNT][CONFIG_USER_MAX]; + // Array storing the currently pushed host ids for the BK queue + void *tx_host_id0[CONFIG_USER_MAX][NX_TXDESC_CNT0]; + // Array storing the currently pushed host ids for the BE queue + void *tx_host_id1[CONFIG_USER_MAX][NX_TXDESC_CNT1]; + // Array storing the currently pushed host ids for the VI queue + void *tx_host_id2[CONFIG_USER_MAX][NX_TXDESC_CNT2]; + // Array storing the currently pushed host ids for the VO queue + void *tx_host_id3[CONFIG_USER_MAX][NX_TXDESC_CNT3]; + #if NX_TXQ_CNT == 5 + // Array storing the currently pushed host ids for the BCN queue + void *tx_host_id4[1][NX_TXDESC_CNT4]; + #endif + // Pointer to the different host ids arrays, per IPC queue + void **tx_host_id[IPC_TXQUEUE_CNT][CONFIG_USER_MAX]; + // Pointer to the different TX descriptor arrays, per IPC queue + volatile struct txdesc_host *txdesc[IPC_TXQUEUE_CNT][CONFIG_USER_MAX]; + + /// Fields for Emb->App MSGs handling + // Global array used to store the hostid and hostbuf addresses for msg/ind + struct ipc_hostbuf ipc_host_msgbuf_array[IPC_MSGE2A_BUF_CNT]; + // Index of the MSG E2A buffers array to point to current buffer + uint8_t ipc_host_msge2a_idx; + // Store the number of E2A MSG buffers + uint32_t ipc_e2amsg_bufnb; + // Store the size of the E2A MSG buffers + uint32_t ipc_e2amsg_bufsz; + + /// E2A ACKs of A2E MSGs + uint8_t msga2e_cnt; + void *msga2e_hostid; + + /// Fields for Debug MSGs handling + // Global array used to store the hostid and hostbuf addresses for Debug messages + struct ipc_hostbuf ipc_host_dbgbuf_array[IPC_DBGBUF_CNT]; + // Index of the Debug messages buffers array to point to current buffer + uint8_t ipc_host_dbg_idx; + // Store the number of Debug messages buffers + uint32_t ipc_dbg_bufnb; + // Store the size of the Debug messages buffers + uint32_t ipc_dbg_bufsz; + + /// Pointer to the attached object (used in callbacks and register accesses) + void *pthis; +}; + +extern const int nx_txdesc_cnt[]; +extern const int nx_txuser_cnt[]; + +/** + ****************************************************************************** + * @brief Returns the full/not full status of the queue the index of which is + * passed as parameter. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] queue_idx Index of the queue to be checked + * + * @return true if the queue is full, false otherwise + * + ****************************************************************************** + */ +__INLINE bool ipc_host_queue_full(struct ipc_host_env_tag *env, + const int queue_idx) +{ + return (env->txdesc_free_idx[queue_idx] == + (env->txdesc_used_idx[queue_idx] + nx_txdesc_cnt[queue_idx])); +} + +/** + ****************************************************************************** + * @brief Initialize the IPC running on the Application CPU. + * + * This function: + * - initializes the IPC software environments + * - enables the interrupts in the IPC block + * + * @param[in] env Pointer to the IPC host environment + * + * @warning Since this function resets the IPC Shared memory, it must be called + * before the LMAC FW is launched because LMAC sets some init values in IPC + * Shared memory at boot. + * + ****************************************************************************** + */ +void ipc_host_init(struct ipc_host_env_tag *env, + struct ipc_host_cb_tag *cb, + struct ipc_shared_env_tag *shared_env_ptr, + void *pthis); + +/** @addtogroup IPC_TX + * @{ + */ + +/** + ****************************************************************************** + * @brief Retrieve a new free Tx descriptor (host side). + * + * This function returns a pointer to the next Tx descriptor available from the + * queue queue_idx to the host driver. The driver will have to fill it with the + * appropriate endianness and to send it to the + * emb side with ipc_host_txdesc_push(). + * + * This function should only be called once until ipc_host_txdesc_push() is called. + * + * This function will return NULL if the queue is full. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] queue_idx Queue index. The index can be inferred from the + * user priority of the incoming packet. + * @param[in] user_pos User position. If MU-MIMO is not used, this value + * shall be 0. + * @return Pointer to the next Tx descriptor free. This can + * point to the host memory or to shared memory, + * depending on IPC implementation. + * + ****************************************************************************** + */ +volatile struct txdesc_host *ipc_host_txdesc_get(struct ipc_host_env_tag *env, + const int queue_idx, + const int user_pos); + + +/** + ****************************************************************************** + * @brief Push a filled Tx descriptor (host side). + * + * This function sets the next Tx descriptor available by the host side: + * - as used for the host side + * - as available for the emb side. + * The Tx descriptor must be correctly filled before calling this function. + * + * This function may trigger an IRQ to the emb CPU depending on the interrupt + * mitigation policy and on the push count. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] queue_idx Queue index. Same value than ipc_host_txdesc_get() + * @param[in] user_pos User position. If MU-MIMO is not used, this value + * shall be 0. + * @param[in] host_id Parameter indicated by the IPC at TX confirmation, + * that allows the driver finding the buffer + * + ****************************************************************************** + */ +void ipc_host_txdesc_push(struct ipc_host_env_tag *env, const int queue_idx, + const int user_pos, void *host_id); + + +/** + ****************************************************************************** + * @brief Check if there are TX frames pending in the TX queues. + * + * @param[in] env Pointer to the IPC host environment + * + * @return true if there are frames pending, false otherwise. + * + ****************************************************************************** + */ +bool ipc_host_tx_frames_pending(struct ipc_host_env_tag *env); + +/** + ****************************************************************************** + * @brief Get and flush a packet from the IPC queue passed as parameter. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] queue_idx Index of the queue to flush + * @param[in] user_pos User position to flush + * + * @return The flushed hostid if there is one, 0 otherwise. + * + ****************************************************************************** + */ +void *ipc_host_tx_flush(struct ipc_host_env_tag *env, const int queue_idx, + const int user_pos); + +/// @} IPC_TX + +/** @addtogroup IPC_RX + * @{ + */ +void ipc_host_patt_addr_push(struct ipc_host_env_tag *env, uint32_t addr); + +/** + ****************************************************************************** + * @brief Push a pre-allocated buffer descriptor for Rx packet (host side) + * + * This function should be called by the host IRQ handler to supply the + * embedded side with new empty buffer. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] hostid Packet ID used by the host (skbuff pointer on Linux) + * @param[in] hostbuf Pointer to the start of the buffer payload in the + * host memory (this may be inferred from the skbuff?) + * The length of this buffer should be predefined + * between host and emb statically (constant needed?). + * + ****************************************************************************** + */ +int ipc_host_rxbuf_push(struct ipc_host_env_tag *env, +#ifdef CONFIG_RWNX_FULLMAC + uint32_t hostid, +#endif + uint32_t hostbuf); + +/** + ****************************************************************************** + * @brief Push a pre-allocated Descriptor + * + * This function should be called by the host IRQ handler to supply the + * embedded side with new empty buffer. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] hostid Address of packet for host + * @param[in] hostbuf Pointer to the start of the buffer payload in the + * host memory. The length of this buffer should be + * predefined between host and emb statically. + * + ****************************************************************************** + */ +int ipc_host_rxdesc_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf); + +/** + ****************************************************************************** + * @brief Push a pre-allocated radar event buffer descriptor + * + * This function is called at Init time to initialize all radar event buffers. + * Then each time embedded send a radar event, this function is used to push + * back the same buffer once it has been handled. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] hostid Address of packet for host + * @param[in] hostbuf Pointer to the start of the buffer payload in the + * host memory. The length of this buffer should be + * predefined between host and emb statically. + * + ****************************************************************************** + */ +int ipc_host_radarbuf_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf); + +/** + ****************************************************************************** + * @brief Push a pre-allocated unsupported rx vector buffer descriptor + * + * This function is called at Init time to initialize all unsupported rx vector + * buffers. Then each time the embedded sends a unsupported rx vector, this + * function is used to push a new unsupported rx vector buffer. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] hostid Address of packet for host + * @param[in] hostbuf Pointer to the start of the buffer payload in the + * host memory. The length of this buffer should be + * predefined between host and emb statically. + * + ****************************************************************************** + */ +int ipc_host_unsup_rx_vec_buf_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf); + +/** + ****************************************************************************** + * @brief Push a pre-allocated buffer descriptor for IPC MSGs (host side) + * + * This function is called at Init time to initialize all Emb2App messages + * buffers. Then each time embedded send a IPC message, this function is used + * to push back the same buffer once it has been handled. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] hostid Address of buffer for host + * @param[in] hostbuf Address of buffer for embedded + * The length of this buffer should be predefined + * between host and emb statically. + * + ****************************************************************************** + */ +int ipc_host_msgbuf_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf); + +/** + ****************************************************************************** + * @brief Push a pre-allocated buffer descriptor for Debug messages (host side) + * + * This function is called at Init time to initialize all debug messages. + * Then each time embedded send a debug message, this function is used to push + * back the same buffer once it has been handled. + * + * @param[in] env Pointer to the IPC host environment + * @param[in] hostid Address of buffer for host + * @param[in] hostbuf Address of buffer for embedded + * The length of this buffer should be predefined + * between host and emb statically. + * + ****************************************************************************** + */ +int ipc_host_dbgbuf_push(struct ipc_host_env_tag *env, void *hostid, + uint32_t hostbuf); + +/** + ****************************************************************************** + * @brief Push the pre-allocated logic analyzer and debug information buffer + * + * @param[in] env Pointer to the IPC host environment + * @param[in] infobuf Address of buffer for embedded + * The length of this buffer should be predefined + * between host and emb statically. + * + ****************************************************************************** + */ +void ipc_host_dbginfobuf_push(struct ipc_host_env_tag *env, uint32_t infobuf); + +/// @} IPC_RX + + + +/** @addtogroup IPC_MISC + * @{ + */ + +/** + ****************************************************************************** + * @brief Handle all IPC interrupts on the host side. + * + * The following interrupts should be handled: + * Tx confirmation, Rx buffer requests, Rx packet ready and kernel messages + * + * @param[in] env Pointer to the IPC host environment + * + ****************************************************************************** + */ +void ipc_host_irq(struct ipc_host_env_tag *env, uint32_t status); + +/** + ****************************************************************************** + * @brief Send a message to the embedded side + * + * @param[in] env Pointer to the IPC host environment + * @param[in] msg_buf Pointer to the message buffer + * @param[in] msg_len Length of the message to be transmitted + * + * @return Non-null value on failure + * + ****************************************************************************** + */ +int ipc_host_msg_push(struct ipc_host_env_tag *env, void *msg_buf, uint16_t len); + +/** + ****************************************************************************** + * @brief Enable IPC interrupts + * + * @param[in] env Global ipc_host environment pointer + * @param[in] value Bitfield of the interrupts to enable + * + * @warning After calling this function, IPC interrupts can be triggered at any + * time. Potentially, an interrupt could happen even before returning from the + * function if there is a request pending from the embedded side. + * + ****************************************************************************** + */ +void ipc_host_enable_irq(struct ipc_host_env_tag *env, uint32_t value); +void ipc_host_disable_irq(struct ipc_host_env_tag *env, uint32_t value); + +uint32_t ipc_host_get_status(struct ipc_host_env_tag *env); +uint32_t ipc_host_get_rawstatus(struct ipc_host_env_tag *env); + +/// @} IPC_MISC + + +#endif // _IPC_HOST_H_ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_shared.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_shared.h new file mode 100644 index 0000000..4860490 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/ipc_shared.h @@ -0,0 +1,802 @@ +/** + **************************************************************************************** + * + * @file ipc_shared.h + * + * @brief Shared data between both IPC modules. + * + * Copyright (C) RivieraWaves 2011-2019 + * + **************************************************************************************** + */ + +#ifndef _IPC_SHARED_H_ +#define _IPC_SHARED_H_ + +/* + * INCLUDE FILES + **************************************************************************************** + */ +#include "ipc_compat.h" +#include "lmac_mac.h" + +/* + * DEFINES AND MACROS + **************************************************************************************** + */ +#define CO_BIT(pos) (1U<<(pos)) + +#define IPC_TXQUEUE_CNT NX_TXQ_CNT +#define NX_TXDESC_CNT0 8 +#define NX_TXDESC_CNT1 64 +#define NX_TXDESC_CNT2 64 +#define NX_TXDESC_CNT3 32 +#if NX_TXQ_CNT == 5 +#define NX_TXDESC_CNT4 8 +#endif + +/* + * Number of Host buffers available for Data Rx handling (through DMA) + */ +#define IPC_RXBUF_CNT 128 + +/* + * Number of shared descriptors available for Data RX handling + */ +#define IPC_RXDESC_CNT 128 + +/* + * Number of Host buffers available for Radar events handling (through DMA) + */ +#define IPC_RADARBUF_CNT 16 + +/* + * Number of Host buffers available for unsupported Rx vectors handling (through DMA) + */ +#define IPC_UNSUPRXVECBUF_CNT 8 + +/* + * Size of RxVector + */ +#define IPC_RXVEC_SIZE 16 + +/* + * Number of Host buffers available for Emb->App MSGs sending (through DMA) + */ +#ifdef CONFIG_RWNX_FULLMAC +#define IPC_MSGE2A_BUF_CNT 64 +#endif +/* + * Number of Host buffers available for Debug Messages sending (through DMA) + */ +#define IPC_DBGBUF_CNT 32 + +/* + * Length used in MSGs structures + */ +#define IPC_A2E_MSG_BUF_SIZE 127 // size in 4-byte words +#ifdef CONFIG_RWNX_FULLMAC +#define IPC_E2A_MSG_SIZE_BASE 256 // size in 4-byte words +#endif + +#ifdef CONFIG_RWNX_TL4 +#define IPC_E2A_MSG_PARAM_SIZE (IPC_E2A_MSG_SIZE_BASE + (IPC_E2A_MSG_SIZE_BASE / 2)) +#else +#define IPC_E2A_MSG_PARAM_SIZE IPC_E2A_MSG_SIZE_BASE +#endif + +/* + * Debug messages buffers size (in bytes) + */ +#define IPC_DBG_PARAM_SIZE 256 + +/* + * Define used for Rx hostbuf validity. + * This value should appear only when hostbuf was used for a Reception. + */ +#define RX_DMA_OVER_PATTERN 0xAAAAAA00 + +/* + * Define used for MSG buffers validity. + * This value will be written only when a MSG buffer is used for sending from Emb to App. + */ +#define IPC_MSGE2A_VALID_PATTERN 0xADDEDE2A + +/* + * Define used for Debug messages buffers validity. + * This value will be written only when a DBG buffer is used for sending from Emb to App. + */ +#define IPC_DBG_VALID_PATTERN 0x000CACA0 + +/* + * Length of the receive vectors, in bytes + */ +#define DMA_HDR_PHYVECT_LEN 36 + +/* + * Maximum number of payload addresses and lengths present in the descriptor + */ +#define NX_TX_PAYLOAD_MAX 6 + +/* + * Message struct/ID API version + */ +#define MSG_API_VER 15 + +/* + **************************************************************************************** + */ +// c.f LMAC/src/tx/tx_swdesc.h +/// Descriptor filled by the Host +struct hostdesc +{ + /// Pointer to packet payload + //u32_l packet_addr; + /// Size of the payload + u16_l packet_len; + u16_l flags_ext; + +#ifdef CONFIG_RWNX_FULLMAC + /// Address of the status descriptor in host memory (used for confirmation upload) + u32_l status_desc_addr; + /// Destination Address + struct mac_addr eth_dest_addr; + /// Source Address + struct mac_addr eth_src_addr; + /// Ethernet Type + u16_l ethertype; +#else /* ! CONFIG_RWNX_FULLMAC */ +#ifdef CONFIG_RWNX_AGG_TX + ///Sequence Number for AMPDU MPDUs - for quick check if it's allowed within window + u16_l sn; +#endif /* CONFIG_RWNX_AGG_TX */ + /// Padding between the buffer control structure and the MPDU in host memory + u8_l padding; +#endif /* CONFIG_RWNX_FULLMAC */ + u8_l ac; + /// Packet TID (0xFF if not a QoS frame) + u8_l tid; + /// Interface Id + u8_l vif_idx; + /// Station Id (0xFF if station is unknown) + u8_l staid; +#ifdef CONFIG_RWNX_MUMIMO_TX + /// MU-MIMO information (GroupId and User Position in the group) - The GroupId + /// is located on bits 0-5 and the User Position on bits 6-7. The GroupId value is set + /// to 63 if MU-MIMO shall not be used + u8_l mumimo_info; +#endif /* CONFIG_RWNX_MUMIMO_TX */ +#ifdef CONFIG_RWNX_FULLMAC + /// TX flags + u16_l flags; +#endif /* CONFIG_RWNX_FULLMAC */ +}; + +/// Descriptor filled by the UMAC +struct umacdesc +{ +#ifdef CONFIG_RWNX_AGG_TX + ///First Sequence Number of the BlockAck window + u16_l sn_win; + /// Flags from UMAC (match tx_hd.macctrlinfo2 format) + u32_l flags; + /// PHY related flags field - rate, GI type, BW type - filled by driver + u32_l phy_flags; +#endif //(CONFIG_RWNX_AGG_TX) +}; + +struct txdesc_api +{ + /// Information provided by Host + struct hostdesc host; +}; + + +struct txdesc_host +{ + u32_l ready; + + /// API of the embedded part + struct txdesc_api api; +}; + +/// Comes from ipc_dma.h +/// Element in the pool of TX DMA bridge descriptors. +struct dma_desc +{ + /** Application subsystem address which is used as source address for DMA payload + * transfer*/ + u32_l src; + /** Points to the start of the embedded data buffer associated with this descriptor. + * This address acts as the destination address for the DMA payload transfer*/ + u32_l dest; + /// Complete length of the buffer in memory + u16_l length; + /// Control word for the DMA engine (e.g. for interrupt generation) + u16_l ctrl; + /// Pointer to the next element of the chained list + u32_l next; +}; + +// Comes from la.h +/// Length of the configuration data of a logic analyzer +#define LA_CONF_LEN 10 + +/// Structure containing the configuration data of a logic analyzer +struct la_conf_tag +{ + u32_l conf[LA_CONF_LEN]; + u32_l trace_len; + u32_l diag_conf; +}; + +/// Size of a logic analyzer memory +#define LA_MEM_LEN (1024 * 1024) + +/// Type of errors +enum +{ + /// Recoverable error, not requiring any action from Upper MAC + DBG_ERROR_RECOVERABLE = 0, + /// Fatal error, requiring Upper MAC to reset Lower MAC and HW and restart operation + DBG_ERROR_FATAL +}; + +/// Maximum length of the SW diag trace +#define DBG_SW_DIAG_MAX_LEN 1024 + +/// Maximum length of the error trace +#define DBG_ERROR_TRACE_SIZE 256 + +/// Number of MAC diagnostic port banks +#define DBG_DIAGS_MAC_MAX 48 + +/// Number of PHY diagnostic port banks +#define DBG_DIAGS_PHY_MAX 32 + +/// Maximum size of the RX header descriptor information in the debug dump +#define DBG_RHD_MEM_LEN (5 * 1024) + +/// Maximum size of the RX buffer descriptor information in the debug dump +#define DBG_RBD_MEM_LEN (5 * 1024) + +/// Maximum size of the TX header descriptor information in the debug dump +#define DBG_THD_MEM_LEN (10 * 1024) + +/// Structure containing the information about the PHY channel that is used +struct phy_channel_info +{ + /// PHY channel information 1 + u32_l info1; + /// PHY channel information 2 + u32_l info2; +}; + +/// Debug information forwarded to host when an error occurs +struct dbg_debug_info_tag +{ + /// Type of error (0: recoverable, 1: fatal) + u32_l error_type; + /// Pointer to the first RX Header Descriptor chained to the MAC HW + u32_l rhd; + /// Size of the RX header descriptor buffer + u32_l rhd_len; + /// Pointer to the first RX Buffer Descriptor chained to the MAC HW + u32_l rbd; + /// Size of the RX buffer descriptor buffer + u32_l rbd_len; + /// Pointer to the first TX Header Descriptors chained to the MAC HW + u32_l thd[NX_TXQ_CNT]; + /// Size of the TX header descriptor buffer + u32_l thd_len[NX_TXQ_CNT]; + /// MAC HW diag configuration + u32_l hw_diag; + /// Error message + u32_l error[DBG_ERROR_TRACE_SIZE/4]; + /// SW diag configuration length + u32_l sw_diag_len; + /// SW diag configuration + u32_l sw_diag[DBG_SW_DIAG_MAX_LEN/4]; + /// PHY channel information + struct phy_channel_info chan_info; + /// Embedded LA configuration + struct la_conf_tag la_conf; + /// MAC diagnostic port state + u16_l diags_mac[DBG_DIAGS_MAC_MAX]; + /// PHY diagnostic port state + u16_l diags_phy[DBG_DIAGS_PHY_MAX]; + /// MAC HW RX Header descriptor pointer + u32_l rhd_hw_ptr; + /// MAC HW RX Buffer descriptor pointer + u32_l rbd_hw_ptr; +}; + +/// Full debug dump that is forwarded to host in case of error +struct dbg_debug_dump_tag +{ + /// Debug information + struct dbg_debug_info_tag dbg_info; + + /// RX header descriptor memory + u32_l rhd_mem[DBG_RHD_MEM_LEN/4]; + + /// RX buffer descriptor memory + u32_l rbd_mem[DBG_RBD_MEM_LEN/4]; + + /// TX header descriptor memory + u32_l thd_mem[NX_TXQ_CNT][DBG_THD_MEM_LEN/4]; + + /// Logic analyzer memory + u32_l la_mem[LA_MEM_LEN/4]; +}; + + +/// Number of pulses in a radar event structure +#define RADAR_PULSE_MAX 4 + +/// Definition of an array of radar pulses +struct radar_pulse_array_desc +{ + /// Buffer containing the radar pulses + u32_l pulse[RADAR_PULSE_MAX]; + /// Index of the radar detection chain that detected those pulses + u32_l idx; + /// Number of valid pulses in the buffer + u32_l cnt; +}; + +/// Bit mapping inside a radar pulse element +struct radar_pulse { + s32_l freq:6; /** Freq (resolution is 2Mhz range is [-Fadc/4 .. Fadc/4]) */ + u32_l fom:4; /** Figure of Merit */ + u32_l len:6; /** Length of the current radar pulse (resolution is 2us) */ + u32_l rep:16; /** Time interval between the previous radar event + and the current one (in us) */ +}; + +/// Definition of a RX vector descriptor +struct rx_vector_desc +{ + /// PHY channel information + struct phy_channel_info phy_info; + + /// RX vector 1 + u32_l rx_vect1[IPC_RXVEC_SIZE/4]; + + /// Used to print a valid rx vector + u32_l pattern; +}; + +/// +struct rxdesc_tag +{ + /// Host Buffer Address + u32_l host_id; + /// Length + u32_l frame_len; + /// Status + u16_l status; +}; + +/** + **************************************************************************************** + * @defgroup IPC IPC + * @ingroup NXMAC + * @brief Inter Processor Communication module. + * + * The IPC module implements the protocol to communicate between the Host CPU + * and the Embedded CPU. + * + * @see http://en.wikipedia.org/wiki/Circular_buffer + * For more information about the ring buffer typical use and difficulties. + **************************************************************************************** + */ + + +/** + **************************************************************************************** + * @addtogroup IPC_TX IPC Tx path + * @ingroup IPC + * @brief IPC Tx path structures and functions + * + * A typical use case of the IPC Tx path API: + * @msc + * hscale = "2"; + * + * a [label=Driver], + * b [label="IPC host"], + * c [label="IPC emb"], + * d [label=Firmware]; + * + * --- [label="Tx descriptor queue example"]; + * a=>a [label="Driver receives a Tx packet from OS"]; + * a=>b [label="ipc_host_txdesc_get()"]; + * a<a [label="Driver fill the descriptor"]; + * a=>b [label="ipc_host_txdesc_push()"]; + * ... [label="(several Tx desc can be pushed)"]; + * b:>c [label="Tx desc queue filled IRQ"]; + * c=>>d [label="EDCA sub-scheduler callback"]; + * c<>d [label="UMAC Tx desc callback"]; + * ... [label="(several Tx desc can be popped)"]; + * d=>d [label="Packets are sent or discarded"]; + * --- [label="Tx confirm queue example"]; + * c<=d [label="ipc_emb_txcfm_push()"]; + * c>>d [label="Request accepted"]; + * ... [label="(several Tx cfm can be pushed)"]; + * b<:c [label="Tx cfm queue filled IRQ"]; + * a<<=b [label="Driver's Tx Confirm callback"]; + * a=>b [label="ipc_host_txcfm_pop()"]; + * a<c [label="ipc_host_rxbuf_push()"]; + * d=>c [label="ipc_host_rxbuf_push()"]; + * d=>c [label="ipc_host_rxbuf_push()"]; + * ... [label="(several Rx buffer are pushed)"]; + * a=>a [label=" Frame is received\n from the medium"]; + * a<a [label=" Firmware fill the buffer\n with received frame"]; + * a<c [label="Rx desc queue filled IRQ"]; + * c=>>d [label="Driver Rx packet callback"]; + * c<=d [label="ipc_host_rxdesc_pop()"]; + * d=>d [label="Rx packet is handed \nover to the OS "]; + * ... [label="(several Rx desc can be poped)"]; + * --- [label="Rx buffer request exemple"]; + * b:>c [label="Low Rx buffer count IRQ"]; + * a<>d [label="Driver Rx buffer callback"]; + * d=>c [label="ipc_host_rxbuf_push()"]; + * d=>c [label="ipc_host_rxbuf_push()"]; + * d=>c [label="ipc_host_rxbuf_push()"]; + * ... [label="(several Rx buffer are pushed)"]; + * @endmsc + * + * @addtogroup IPC_RX + * @{ + **************************************************************************************** + */ + +/// @} IPC_RX + + + +/** + **************************************************************************************** + * @defgroup IPC_MISC IPC Misc + * @ingroup IPC + * @brief IPC miscellaneous functions + **************************************************************************************** + */ +/** IPC header structure. This structure is stored at the beginning of every IPC message. + * @warning This structure's size must NOT exceed 4 bytes in length. + */ +struct ipc_header +{ + /// IPC message type. + u16_l type; + /// IPC message size in number of bytes. + u16_l size; +}; + +struct ipc_msg_elt +{ + /// Message header (alignment forced on word size, see allocation in shared env). + struct ipc_header header __ALIGN4; +}; + +/// Message structure for MSGs from Emb to App +struct ipc_e2a_msg +{ + u16_l id; ///< Message id. + u16_l dummy_dest_id; + u16_l dummy_src_id; + u16_l param_len; ///< Parameter embedded struct length. + u32_l pattern; ///< Used to stamp a valid MSG buffer + u32_l param[IPC_E2A_MSG_PARAM_SIZE]; ///< Parameter embedded struct. Must be word-aligned. +}; + +/// Message structure for Debug messages from Emb to App +struct ipc_dbg_msg +{ + u32_l string[IPC_DBG_PARAM_SIZE/4]; ///< Debug string + u32_l pattern; ///< Used to stamp a valid buffer +}; + +/// Message structure for MSGs from App to Emb. +/// Actually a sub-structure will be used when filling the messages. +struct ipc_a2e_msg +{ + u32_l dummy_word; // used to cope with kernel message structure + u32_l msg[IPC_A2E_MSG_BUF_SIZE]; // body of the msg +}; + +struct ipc_shared_rx_buf +{ + /// < ptr to hostbuf client (ipc_host client) structure + u32_l hostid; + /// < ptr to real hostbuf dma address + u32_l dma_addr; +}; + +struct ipc_shared_rx_desc +{ + /// DMA Address + u32_l dma_addr; +}; + +/// Structure containing FW characteristics for compatibility checking +struct compatibility_tag { + /// Size of IPC shared memory + u16_l ipc_shared_size; + /// Message struct/ID API version + u16_l msg_api; + /// Version of IPC shared + u8_l ipc_shared_version; + /// Number of host buffers available for Emb->App MSGs sending + u8_l msge2a_buf_cnt; + /// Number of host buffers available for Debug Messages sending + u8_l dbgbuf_cnt; + /// Number of host buffers available for Radar events handling + u8_l radarbuf_cnt; + /// Number of host buffers available for unsupported Rx vectors handling + u8_l unsuprxvecbuf_cnt; + /// Number of shared descriptors available for Data RX handling + u8_l rxdesc_cnt; + /// Number of host buffers available for Data Rx handling + u8_l rxbuf_cnt; + /// Number of descriptors in BK TX queue (power of 2, min 4, max 64) + u8_l bk_txq; + /// Number of descriptors in BE TX queue (power of 2, min 4, max 64) + u8_l be_txq; + /// Number of descriptors in VI TX queue (power of 2, min 4, max 64) + u8_l vi_txq; + /// Number of descriptors in VO TX queue (power of 2, min 4, max 64) + u8_l vo_txq; + /// Number of descriptors in BCN TX queue (power of 2, min 4, max 64) + u8_l bcn_txq; +}; + +/* + * TYPE and STRUCT DEFINITIONS + **************************************************************************************** + */ + + +// Indexes are defined in the MIB shared structure +struct ipc_shared_env_tag +{ + volatile struct compatibility_tag comp_info; //FW characteristics + + volatile struct ipc_a2e_msg msg_a2e_buf; // room for MSG to be sent from App to Emb + + // Fields for MSGs sending from Emb to App + volatile struct ipc_e2a_msg msg_e2a_buf; // room to build the MSG to be DMA Xferred + volatile struct dma_desc msg_dma_desc; // DMA descriptor for Emb->App MSGs Xfers + volatile u32_l msg_e2a_hostbuf_addr [IPC_MSGE2A_BUF_CNT]; // buffers @ for DMA Xfers + + // Fields for Debug MSGs sending from Emb to App + volatile struct ipc_dbg_msg dbg_buf; // room to build the MSG to be DMA Xferred + volatile struct dma_desc dbg_dma_desc; // DMA descriptor for Emb->App MSGs Xfers + volatile u32_l dbg_hostbuf_addr [IPC_DBGBUF_CNT]; // buffers @ for MSGs DMA Xfers + volatile u32_l la_dbginfo_addr; // Host buffer address for the debug information + volatile u32_l pattern_addr; + volatile u32_l radarbuf_hostbuf [IPC_RADARBUF_CNT]; // buffers @ for Radar Events + volatile u32_l unsuprxvecbuf_hostbuf [IPC_UNSUPRXVECBUF_CNT]; // buffers @ for unsupported Rx vectors + volatile struct txdesc_host txdesc0[CONFIG_USER_MAX][NX_TXDESC_CNT0]; + volatile struct txdesc_host txdesc1[CONFIG_USER_MAX][NX_TXDESC_CNT1]; + volatile struct txdesc_host txdesc2[CONFIG_USER_MAX][NX_TXDESC_CNT2]; + volatile struct txdesc_host txdesc3[CONFIG_USER_MAX][NX_TXDESC_CNT3]; + #if NX_TXQ_CNT == 5 + volatile struct txdesc_host txdesc4[1][NX_TXDESC_CNT4]; + #endif + #ifdef CONFIG_RWNX_FULLMAC + // RX Descriptors Array + volatile struct ipc_shared_rx_desc host_rxdesc[IPC_RXDESC_CNT]; + // RX Buffers Array + volatile struct ipc_shared_rx_buf host_rxbuf[IPC_RXBUF_CNT]; + #else + // buffers @ for Data Rx + volatile u32_l host_rxbuf[IPC_RXBUF_CNT]; + #endif /* CONFIG_RWNX_FULLMAC */ + + u32_l buffered[NX_REMOTE_STA_MAX][TID_MAX]; + + volatile uint16_t trace_pattern; + volatile uint32_t trace_start; + volatile uint32_t trace_end; + volatile uint32_t trace_size; + volatile uint32_t trace_offset; + volatile uint32_t trace_nb_compo; + volatile uint32_t trace_offset_compo; +}; + +extern struct ipc_shared_env_tag ipc_shared_env; + + +/* + * TYPE and STRUCT DEFINITIONS + **************************************************************************************** + */ + +// IRQs from app to emb +/// Interrupts bits used for the TX descriptors of the AC queues +#ifdef CONFIG_RWNX_MUMIMO_TX +#ifdef CONFIG_RWNX_OLD_IPC +#error "MU-MIMO cannot be compiled for old IPC" +#endif +/// Interrupts bits used +#if CONFIG_USER_MAX > 3 +#define IPC_IRQ_A2E_USER_MSK 0xF +#elif CONFIG_USER_MAX > 2 +#define IPC_IRQ_A2E_USER_MSK 0x7 +#else +#define IPC_IRQ_A2E_USER_MSK 0x3 +#endif + +/// Offset of the interrupts for AC0 +#define IPC_IRQ_A2E_AC0_OFT 8 +/// Mask of the interrupts for AC0 +#define IPC_IRQ_A2E_AC0_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC0_OFT) +/// Offset of the interrupts for AC1 +#define IPC_IRQ_A2E_AC1_OFT (IPC_IRQ_A2E_AC0_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for AC1 +#define IPC_IRQ_A2E_AC1_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC1_OFT) +/// Offset of the interrupts for AC2 +#define IPC_IRQ_A2E_AC2_OFT (IPC_IRQ_A2E_AC1_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for AC2 +#define IPC_IRQ_A2E_AC2_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC2_OFT) +/// Offset of the interrupts for AC3 +#define IPC_IRQ_A2E_AC3_OFT (IPC_IRQ_A2E_AC2_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for AC3 +#define IPC_IRQ_A2E_AC3_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC3_OFT) +/// Offset of the interrupts for BCN +#define IPC_IRQ_A2E_BCN_OFT (IPC_IRQ_A2E_AC3_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for BCN +#define IPC_IRQ_A2E_BCN_MSK CO_BIT(IPC_IRQ_A2E_BCN_OFT) + +#define IPC_IRQ_A2E_AC_TXDESC (IPC_IRQ_A2E_AC0_MSK | IPC_IRQ_A2E_AC1_MSK | \ + IPC_IRQ_A2E_AC2_MSK | IPC_IRQ_A2E_AC3_MSK) + +/// Interrupts bits used for the TX descriptors of the BCN queue +#if NX_TXQ_CNT < 5 +#define IPC_IRQ_A2E_BCN_TXDESC 0 +#else +#define IPC_IRQ_A2E_BCN_TXDESC (0x01 << IPC_IRQ_A2E_BCN_OFT) +#endif + +/// IPC TX descriptor interrupt mask +#define IPC_IRQ_A2E_TXDESC (IPC_IRQ_A2E_AC_TXDESC | IPC_IRQ_A2E_BCN_TXDESC) +#else +/// IPC TX descriptor interrupt mask +#define IPC_IRQ_A2E_TXDESC 0xFF00 +#endif + +#define IPC_IRQ_A2E_TXDESC_FIRSTBIT (8) +#define IPC_IRQ_A2E_RXBUF_BACK CO_BIT(5) +#define IPC_IRQ_A2E_RXDESC_BACK CO_BIT(4) + +#define IPC_IRQ_A2E_MSG CO_BIT(1) +#define IPC_IRQ_A2E_DBG CO_BIT(0) + +#define IPC_IRQ_A2E_ALL (IPC_IRQ_A2E_TXDESC|IPC_IRQ_A2E_MSG|IPC_IRQ_A2E_DBG) + +// IRQs from emb to app +#define IPC_IRQ_E2A_TXCFM_POS 7 + +#ifdef CONFIG_RWNX_MUMIMO_TX +#ifdef CONFIG_RWNX_OLD_IPC +#error "MU-MIMO cannot be compiled for old IPC" +#endif +/// Interrupts bits used +#if CONFIG_USER_MAX > 3 +#define IPC_IRQ_E2A_USER_MSK 0xF +#elif CONFIG_USER_MAX > 2 +#define IPC_IRQ_E2A_USER_MSK 0x7 +#else +#define IPC_IRQ_E2A_USER_MSK 0x3 +#endif + +/// Offset of the interrupts for AC0 +#define IPC_IRQ_E2A_AC0_OFT IPC_IRQ_E2A_TXCFM_POS +/// Mask of the interrupts for AC0 +#define IPC_IRQ_E2A_AC0_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC0_OFT) +/// Offset of the interrupts for AC1 +#define IPC_IRQ_E2A_AC1_OFT (IPC_IRQ_E2A_AC0_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for AC1 +#define IPC_IRQ_E2A_AC1_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC1_OFT) +/// Offset of the interrupts for AC2 +#define IPC_IRQ_E2A_AC2_OFT (IPC_IRQ_E2A_AC1_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for AC2 +#define IPC_IRQ_E2A_AC2_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC2_OFT) +/// Offset of the interrupts for AC3 +#define IPC_IRQ_E2A_AC3_OFT (IPC_IRQ_E2A_AC2_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for AC3 +#define IPC_IRQ_E2A_AC3_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC3_OFT) +/// Offset of the interrupts for BCN +#define IPC_IRQ_E2A_BCN_OFT (IPC_IRQ_E2A_AC3_OFT + CONFIG_USER_MAX) +/// Mask of the interrupts for BCN +#define IPC_IRQ_E2A_BCN_MSK CO_BIT(IPC_IRQ_E2A_BCN_OFT) + +#define IPC_IRQ_E2A_AC_TXCFM (IPC_IRQ_E2A_AC0_MSK | IPC_IRQ_E2A_AC1_MSK | \ + IPC_IRQ_E2A_AC2_MSK | IPC_IRQ_E2A_AC3_MSK) + +/// Interrupts bits used for the TX descriptors of the BCN queue +#if NX_TXQ_CNT < 5 +#define IPC_IRQ_E2A_BCN_TXCFM 0 +#else +#define IPC_IRQ_E2A_BCN_TXCFM (0x01 << IPC_IRQ_E2A_BCN_OFT) +#endif + +/// IPC TX descriptor interrupt mask +#define IPC_IRQ_E2A_TXCFM (IPC_IRQ_E2A_AC_TXCFM | IPC_IRQ_E2A_BCN_TXCFM) + +#else + +#define IPC_IRQ_E2A_TXCFM ((1 << NX_TXQ_CNT) - 1 ) << IPC_IRQ_E2A_TXCFM_POS + +#endif /* CONFIG_RWNX_MUMIMO_TX */ + +#define IPC_IRQ_E2A_UNSUP_RX_VEC CO_BIT(7) +#define IPC_IRQ_E2A_RADAR CO_BIT(6) +#define IPC_IRQ_E2A_TBTT_SEC CO_BIT(5) +#define IPC_IRQ_E2A_TBTT_PRIM CO_BIT(4) +#define IPC_IRQ_E2A_RXDESC CO_BIT(3) +#define IPC_IRQ_E2A_MSG_ACK CO_BIT(2) +#define IPC_IRQ_E2A_MSG CO_BIT(1) +#define IPC_IRQ_E2A_DBG CO_BIT(0) + +#define IPC_IRQ_E2A_ALL ( IPC_IRQ_E2A_TXCFM \ + | IPC_IRQ_E2A_RXDESC \ + | IPC_IRQ_E2A_MSG_ACK \ + | IPC_IRQ_E2A_MSG \ + | IPC_IRQ_E2A_DBG \ + | IPC_IRQ_E2A_TBTT_PRIM \ + | IPC_IRQ_E2A_TBTT_SEC \ + | IPC_IRQ_E2A_RADAR \ + | IPC_IRQ_E2A_UNSUP_RX_VEC) + +// FLAGS for RX desc +#define IPC_RX_FORWARD CO_BIT(1) +#define IPC_RX_INTRABSS CO_BIT(0) + + +// IPC message TYPE +enum +{ + IPC_MSG_NONE = 0, + IPC_MSG_WRAP, + IPC_MSG_KMSG, + + IPC_DBG_STRING, + +}; + +#endif // _IPC_SHARED_H_ + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_mac.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_mac.h new file mode 100644 index 0000000..1d90007 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_mac.h @@ -0,0 +1,589 @@ +/** + **************************************************************************************** + * + * @file lmac_mac_types.h + * + * @brief MAC related definitions. + * + * Adapted from mac_types.h to used lmac_types.h instead of standard types + * eg: perl -pi -e '$_ =~ s/uint(\d{1,2})_t/u$1_l/g; \ + * $_ =~ s/int(\d{1,2})_t/s$1_l/g; \ + * $_ =~ s/CO_BIT/BIT/g;' lmac_mac.h + * + * Copyright (C) RivieraWaves 2011-2019 + * + **************************************************************************************** + */ + +#ifndef LMAC_MAC_H_ +#define LMAC_MAC_H_ + +#include "lmac_types.h" + +/// Interface types +enum mac_vif_type +{ + /// ESS STA interface + VIF_STA, + /// IBSS STA interface + VIF_IBSS, + /// AP interface + VIF_AP, + /// Mesh Point interface + VIF_MESH_POINT, + /// Monitor interface + VIF_MONITOR, + /// Unknown type + VIF_UNKNOWN +}; + +/// MAC address length in bytes. +#define MAC_ADDR_LEN 6 + +/// MAC address structure. +struct mac_addr +{ + /// Array of 16-bit words that make up the MAC address. + u16_l array[MAC_ADDR_LEN/2]; +}; + +/// SSID maximum length. +#define MAC_SSID_LEN 32 + +/// SSID. +struct mac_ssid +{ + /// Actual length of the SSID. + u8_l length; + /// Array containing the SSID name. + u8_l array[MAC_SSID_LEN]; +}; + +/// BSS type +enum mac_bss_type +{ + INFRASTRUCTURE_MODE = 1, + INDEPENDENT_BSS_MODE, + ANY_BSS_MODE +}; + +/// Channel Band +enum mac_chan_band +{ + /// 2.4GHz Band + PHY_BAND_2G4, + /// 5GHz band + PHY_BAND_5G, + /// Number of bands + PHY_BAND_MAX, +}; + +/// Operating Channel Bandwidth +enum mac_chan_bandwidth +{ + /// 20MHz BW + PHY_CHNL_BW_20, + /// 40MHz BW + PHY_CHNL_BW_40, + /// 80MHz BW + PHY_CHNL_BW_80, + /// 160MHz BW + PHY_CHNL_BW_160, + /// 80+80MHz BW + PHY_CHNL_BW_80P80, + /// Reserved BW + PHY_CHNL_BW_OTHER, +}; + +/// max number of channels in the 2.4 GHZ band +#define MAC_DOMAINCHANNEL_24G_MAX 14 + +/// max number of channels in the 5 GHZ band +#define MAC_DOMAINCHANNEL_5G_MAX 28 + +/// Channel Flag +enum mac_chan_flags +{ + /// Cannot initiate radiation on this channel + CHAN_NO_IR = BIT(0), + /// Channel is not allowed + CHAN_DISABLED = BIT(1), + /// Radar detection required on this channel + CHAN_RADAR = BIT(2), +}; + +/// Primary Channel definition +struct mac_chan_def +{ + /// Frequency of the channel (in MHz) + u16_l freq; + /// RF band (@ref mac_chan_band) + u8_l band; + /// Additional information (@ref mac_chan_flags) + u8_l flags; + /// Max transmit power allowed on this channel (dBm) + s8_l tx_power; +}; + +/// Operating Channel +struct mac_chan_op +{ + /// Band (@ref mac_chan_band) + u8_l band; + /// Channel type (@ref mac_chan_bandwidth) + u8_l type; + /// Frequency for Primary 20MHz channel (in MHz) + u16_l prim20_freq; + /// Frequency center of the contiguous channel or center of Primary 80+80 (in MHz) + u16_l center1_freq; + /// Frequency center of the non-contiguous secondary 80+80 (in MHz) + u16_l center2_freq; + /// Max transmit power allowed on this channel (dBm) + s8_l tx_power; + /// Additional information (@ref mac_chan_flags) + u8_l flags; +}; + +/// Cipher suites (order is important as it is used by MACHW) +enum mac_cipher_suite +{ + /// 00-0F-AC 1 + MAC_CIPHER_WEP40 = 0, + /// 00-0F-AC 2 + MAC_CIPHER_TKIP = 1, + /// 00-0F-AC 4 + MAC_CIPHER_CCMP = 2, + /// 00-0F-AC 5 + MAC_CIPHER_WEP104 = 3, + /// 00-14-72 1 + MAC_CIPHER_WPI_SMS4 = 4, + /// 00-0F-AC 6 (aka AES_CMAC) + MAC_CIPHER_BIP_CMAC_128 = 5, + + // following cipher are not supported by MACHW + /// 00-0F-AC 08 + MAC_CIPHER_GCMP_128, + /// 00-0F-AC 09 + MAC_CIPHER_GCMP_256, + /// 00-0F-AC 10 + MAC_CIPHER_CCMP_256, + /// 00-0F-AC 11 + MAC_CIPHER_BIP_GMAC_128, + /// 00-0F-AC 12 + MAC_CIPHER_BIP_GMAC_256, + /// 00-0F-AC 13 + MAC_CIPHER_BIP_CMAC_256, + + MAC_CIPHER_INVALID = 0xFF +}; + +/// Authentication and Key Management suite +enum mac_akm_suite +{ + /// No security + MAC_AKM_NONE, + /// Pre RSN (WEP or WPA) + MAC_AKM_PRE_RSN, + /// 00-0F-AC 1 + MAC_AKM_8021X, + /// 00-0F-AC 2 + MAC_AKM_PSK, + /// 00-0F-AC 3 + MAC_AKM_FT_8021X, + /// 00-0F-AC 4 + MAC_AKM_FT_PSK, + /// 00-0F-AC 5 + MAC_AKM_8021X_SHA256, + /// 00-0F-AC 6 + MAC_AKM_PSK_SHA256, + /// 00-0F-AC 7 + MAC_AKM_TDLS, + /// 00-0F-AC 8 + MAC_AKM_SAE, + /// 00-0F-AC 9 + MAC_AKM_FT_OVER_SAE, + /// 00-0F-AC 11 + MAC_AKM_8021X_SUITE_B, + /// 00-0F-AC 12 + MAC_AKM_8021X_SUITE_B_192, + /// 00-0F-AC 14 + MAC_AKM_FILS_SHA256, + /// 00-0F-AC 15 + MAC_AKM_FILS_SHA384, + /// 00-0F-AC 16 + MAC_AKM_FT_FILS_SHA256, + /// 00-0F-AC 17 + MAC_AKM_FT_FILS_SHA384, + /// 00-0F-AC 18 + MAC_AKM_OWE, + + /// 00-14-72 1 + MAC_AKM_WAPI_CERT, + /// 00-14-72 2 + MAC_AKM_WAPI_PSK, +}; + +/// Scan result element, parsed from beacon or probe response frames. +struct mac_scan_result +{ + /// Scan result is valid + bool valid_flag; + /// Network BSSID. + struct mac_addr bssid; + /// Network name. + struct mac_ssid ssid; + /// Network type (@ref mac_bss_type). + u16_l bsstype; + /// Network channel. + struct mac_chan_def *chan; + /// Network beacon period (in TU). + u16_l beacon_period; + /// Capability information + u16_l cap_info; + /// Supported AKM (bit-field of @ref mac_akm_suite) + u32_l akm; + /// Group cipher (bit-field of @ref mac_cipher_suite) + u16_l group_cipher; + /// Group cipher (bit-field of @ref mac_cipher_suite) + u16_l pairwise_cipher; + /// RSSI of the scanned BSS (in dBm) + s8_l rssi; + ///Multi-BSSID index (0 if this is the reference (i.e. transmitted) BSSID) + u8_l mluti_bssid_index; + ///Maximum BSSID indicator + u8_l max_bssid_indicator; +}; + +/// Legacy rate 802.11 definitions +enum mac_legacy_rates +{ + /// DSSS/CCK 1Mbps + MAC_RATE_1MBPS = 2, + /// DSSS/CCK 2Mbps + MAC_RATE_2MBPS = 4, + /// DSSS/CCK 5.5Mbps + MAC_RATE_5_5MBPS = 11, + /// OFDM 6Mbps + MAC_RATE_6MBPS = 12, + /// OFDM 9Mbps + MAC_RATE_9MBPS = 18, + /// DSSS/CCK 11Mbps + MAC_RATE_11MBPS = 22, + /// OFDM 12Mbps + MAC_RATE_12MBPS = 24, + /// OFDM 18Mbps + MAC_RATE_18MBPS = 36, + /// OFDM 24Mbps + MAC_RATE_24MBPS = 48, + /// OFDM 36Mbps + MAC_RATE_36MBPS = 72, + /// OFDM 48Mbps + MAC_RATE_48MBPS = 96, + /// OFDM 54Mbps + MAC_RATE_54MBPS = 108 +}; + +/// BSS Membership Selector definitions +enum mac_bss_membership +{ + /// HT PHY + MAC_BSS_MEMBERSHIP_HT_PHY = 127, + /// VHT PHY + MAC_BSS_MEMBERSHIP_VHT_PHY = 126, +}; + +/// MAC rateset maximum length +#define MAC_RATESET_LEN 12 + +/// Structure containing the legacy rateset of a station +struct mac_rateset +{ + /// Number of legacy rates supported + u8_l length; + /// Array of legacy rates + u8_l array[MAC_RATESET_LEN]; +}; + +/// MAC Security Key maximum length +#define MAC_SEC_KEY_LEN 32 // TKIP keys 256 bits (max length) with MIC keys + +/// Structure defining a security key +struct mac_sec_key +{ + /// Key material length + u8_l length; + /// Key material + u32_l array[MAC_SEC_KEY_LEN/4]; +}; + +/// Access Category enumeration +enum mac_ac +{ + /// Background + AC_BK = 0, + /// Best-effort + AC_BE, + /// Video + AC_VI, + /// Voice + AC_VO, + /// Number of access categories + AC_MAX +}; + +/// Traffic ID enumeration +enum mac_tid +{ + /// TID_0. Mapped to @ref AC_BE as per 802.11 standard. + TID_0, + /// TID_1. Mapped to @ref AC_BK as per 802.11 standard. + TID_1, + /// TID_2. Mapped to @ref AC_BK as per 802.11 standard. + TID_2, + /// TID_3. Mapped to @ref AC_BE as per 802.11 standard. + TID_3, + /// TID_4. Mapped to @ref AC_VI as per 802.11 standard. + TID_4, + /// TID_5. Mapped to @ref AC_VI as per 802.11 standard. + TID_5, + /// TID_6. Mapped to @ref AC_VO as per 802.11 standard. + TID_6, + /// TID_7. Mapped to @ref AC_VO as per 802.11 standard. + TID_7, + /// Non standard Management TID used internally + TID_MGT, + /// Number of TID supported + TID_MAX +}; + +/// MCS bitfield maximum size (in bytes) +#define MAX_MCS_LEN 16 // 16 * 8 = 128 + +/// MAC HT capability information element +struct mac_htcapability +{ + /// HT capability information + u16_l ht_capa_info; + /// A-MPDU parameters + u8_l a_mpdu_param; + /// Supported MCS + u8_l mcs_rate[MAX_MCS_LEN]; + /// HT extended capability information + u16_l ht_extended_capa; + /// Beamforming capability information + u32_l tx_beamforming_capa; + /// Antenna selection capability information + u8_l asel_capa; +}; + +/// MAC VHT capability information element +struct mac_vhtcapability +{ + /// VHT capability information + u32_l vht_capa_info; + /// RX MCS map + u16_l rx_mcs_map; + /// RX highest data rate + u16_l rx_highest; + /// TX MCS map + u16_l tx_mcs_map; + /// TX highest data rate + u16_l tx_highest; +}; + +/// Length (in bytes) of the MAC HE capability field +#define MAC_HE_MAC_CAPA_LEN 6 +/// Length (in bytes) of the PHY HE capability field +#define MAC_HE_PHY_CAPA_LEN 11 +/// Maximum length (in bytes) of the PPE threshold data +#define MAC_HE_PPE_THRES_MAX_LEN 25 + +/// Structure listing the per-NSS, per-BW supported MCS combinations +struct mac_he_mcs_nss_supp +{ + /// per-NSS supported MCS in RX, for BW <= 80MHz + u16_l rx_mcs_80; + /// per-NSS supported MCS in TX, for BW <= 80MHz + u16_l tx_mcs_80; + /// per-NSS supported MCS in RX, for BW = 160MHz + u16_l rx_mcs_160; + /// per-NSS supported MCS in TX, for BW = 160MHz + u16_l tx_mcs_160; + /// per-NSS supported MCS in RX, for BW = 80+80MHz + u16_l rx_mcs_80p80; + /// per-NSS supported MCS in TX, for BW = 80+80MHz + u16_l tx_mcs_80p80; +}; + +/// MAC HE capability information element +struct mac_hecapability +{ + /// MAC HE capabilities + u8_l mac_cap_info[MAC_HE_MAC_CAPA_LEN]; + /// PHY HE capabilities + u8_l phy_cap_info[MAC_HE_PHY_CAPA_LEN]; + /// Supported MCS combinations + struct mac_he_mcs_nss_supp mcs_supp; + /// PPE Thresholds data + u8_l ppe_thres[MAC_HE_PPE_THRES_MAX_LEN]; +}; + +/// Station flags +enum mac_sta_flags +{ + /// Bit indicating that a STA has QoS (WMM) capability + STA_QOS_CAPA = BIT(0), + /// Bit indicating that a STA has HT capability + STA_HT_CAPA = BIT(1), + /// Bit indicating that a STA has VHT capability + STA_VHT_CAPA = BIT(2), + /// Bit indicating that a STA has MFP capability + STA_MFP_CAPA = BIT(3), + /// Bit indicating that the STA included the Operation Notification IE + STA_OPMOD_NOTIF = BIT(4), + /// Bit indicating that a STA has HE capability + STA_HE_CAPA = BIT(5), +}; + +/// Connection flags +enum mac_connection_flags +{ + /// Flag indicating whether the control port is controlled by host or not + CONTROL_PORT_HOST = BIT(0), + /// Flag indicating whether the control port frame shall be sent unencrypted + CONTROL_PORT_NO_ENC = BIT(1), + /// Flag indicating whether HT and VHT shall be disabled or not + DISABLE_HT = BIT(2), + /// Flag indicating whether WPA or WPA2 authentication is in use + WPA_WPA2_IN_USE = BIT(3), + /// Flag indicating whether MFP is in use + MFP_IN_USE = BIT(4), + // Flag indicating Roam + REASSOCIATION = BIT(5), +}; + +#ifdef CONFIG_HE_FOR_OLD_KERNEL +#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 +#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 +#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 +#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80 +#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01 +#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 +#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20 +#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 +#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00 +#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40 +#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c +#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40 +#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80 +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01 +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02 +#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04 +#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08 +#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80 +#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40 +#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04 +#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10 +#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20 + +struct ieee80211_he_cap_elem { + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; +} __packed; + +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +} __packed; + +#define IEEE80211_HE_PPE_THRES_MAX_LEN 25 + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#define WLAN_EID_EXTENSION 255 +/* Element ID Extensions for Element ID 255 */ + +enum ieee80211_eid_ext { + WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, + WLAN_EID_EXT_FILS_REQ_PARAMS = 2, + WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, + WLAN_EID_EXT_FILS_SESSION = 4, + WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, + WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, + WLAN_EID_EXT_KEY_DELIVERY = 7, + WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, + WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, + WLAN_EID_EXT_FILS_NONCE = 13, + WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, + +}; + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +#define WLAN_EID_EXT_HE_CAPABILITY 35 +#define WLAN_EID_EXT_HE_OPERATION 36 +#define WLAN_EID_EXT_UORA 37 +#define WLAN_EID_EXT_HE_MU_EDCA 38 +#define WLAN_EID_EXT_HE_SPR 39 +#define WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET 41 +#define WLAN_EID_EXT_BSS_COLOR_CHG_ANN 42 +#define WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP 43 +#define WLAN_EID_EXT_ESS_REPORT 45 +#define WLAN_EID_EXT_OPS 46 +#define WLAN_EID_EXT_HE_BSS_LOAD 47 +#define WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME 52 +#define WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION 55 +#define WLAN_EID_EXT_NON_INHERITANCE 56 +#define WLAN_EID_EXT_KNOWN_BSSID 57 +#define WLAN_EID_EXT_SHORT_SSID_LIST 58 +#define WLAN_EID_EXT_HE_6GHZ_CAPA 59 +#define WLAN_EID_EXT_UL_MU_POWER_CAPA 60 +#define WLAN_EID_EXT_EHT_OPERATION 106 +#define WLAN_EID_EXT_EHT_MULTI_LINK 107 +#define WLAN_EID_EXT_EHT_CAPABILITY 108 + +#endif + +struct ieee80211_sta_he_cap { + bool has_he; + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN]; +}; + +struct ieee80211_sband_iftype_data { + u16 types_mask; + struct ieee80211_sta_he_cap he_cap; +}; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) +struct ieee80211_vht_mcs_info { + __le16 rx_mcs_map; + __le16 rx_highest; + __le16 tx_mcs_map; + __le16 tx_highest; +} __packed; + +struct ieee80211_vht_cap { + __le32 vht_cap_info; + struct ieee80211_vht_mcs_info supp_mcs; +}; +#define WLAN_EID_VHT_CAPABILITY 191 + +struct ieee80211_sta_vht_cap { + bool vht_supported; + u32 cap; /* use IEEE80211_VHT_CAP_ */ + struct ieee80211_vht_mcs_info vht_mcs; +}; +#endif +#endif // LMAC_MAC_H_ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_msg.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_msg.h new file mode 100644 index 0000000..40ce7f8 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_msg.h @@ -0,0 +1,3468 @@ +/** + **************************************************************************************** + * + * @file lmac_msg.h + * + * @brief Main definitions for message exchanges with LMAC + * + * Copyright (C) RivieraWaves 2011-2019 + * + **************************************************************************************** + */ + +#ifndef LMAC_MSG_H_ +#define LMAC_MSG_H_ + +/* + * INCLUDE FILES + **************************************************************************************** + */ +// for MAC related elements (mac_addr, mac_ssid...) +#include "lmac_mac.h" + +/* + **************************************************************************************** + */ +///////////////////////////////////////////////////////////////////////////////// +// COMMUNICATION WITH LMAC LAYER +///////////////////////////////////////////////////////////////////////////////// +/* Task identifiers for communication between LMAC and DRIVER */ +enum +{ + TASK_NONE = (u8_l) -1, + + // MAC Management task. + TASK_MM = 0, + // DEBUG task + TASK_DBG, + /// SCAN task + TASK_SCAN, + /// TDLS task + TASK_TDLS, + /// SCANU task + TASK_SCANU, + /// ME task + TASK_ME, + /// SM task + TASK_SM, + /// APM task + TASK_APM, + /// BAM task + TASK_BAM, + /// MESH task + TASK_MESH, + /// RXU task + TASK_RXU, + /// RM task + TASK_RM, +#if defined CONFIG_RWNX_FULLMAC || defined CONFIG_RWNX_FHOST + // This is used to define the last task that is running on the EMB processor + TASK_LAST_EMB = TASK_RM, +#else +#error "Need to define SOFTMAC or FULLMAC" +#endif + // nX API task + TASK_API, + TASK_MAX, +}; + + +/// For MAC HW States copied from "hal_machw.h" +enum +{ + /// MAC HW IDLE State. + HW_IDLE = 0, + /// MAC HW RESERVED State. + HW_RESERVED, + /// MAC HW DOZE State. + HW_DOZE, + /// MAC HW ACTIVE State. + HW_ACTIVE +}; + +/// Power Save mode setting +enum mm_ps_mode_state +{ + MM_PS_MODE_OFF, + MM_PS_MODE_ON, + MM_PS_MODE_ON_DYN, +}; + +/// Status/error codes used in the MAC software. +enum +{ + CO_OK, + CO_FAIL, + CO_EMPTY, + CO_FULL, + CO_BAD_PARAM, + CO_NOT_FOUND, + CO_NO_MORE_ELT_AVAILABLE, + CO_NO_ELT_IN_USE, + CO_BUSY, + CO_OP_IN_PROGRESS, +}; + +/// Remain on channel operation codes +enum mm_remain_on_channel_op +{ + MM_ROC_OP_START = 0, + MM_ROC_OP_CANCEL, +}; + +#define DRV_TASK_ID 100 + +/// Message Identifier. The number of messages is limited to 0xFFFF. +/// The message ID is divided in two parts: +/// - bits[15..10] : task index (no more than 64 tasks supported). +/// - bits[9..0] : message index (no more that 1024 messages per task). +typedef u16 lmac_msg_id_t; + +typedef u16 lmac_task_id_t; + +/// Build the first message ID of a task. +#define LMAC_FIRST_MSG(task) ((lmac_msg_id_t)((task) << 10)) + +#define MSG_T(msg) ((lmac_task_id_t)((msg) >> 10)) +#define MSG_I(msg) ((msg) & ((1<<10)-1)) + +/// Message structure. +struct lmac_msg +{ + lmac_msg_id_t id; ///< Message id. + lmac_task_id_t dest_id; ///< Destination kernel identifier. + lmac_task_id_t src_id; ///< Source kernel identifier. + u16 param_len; ///< Parameter embedded struct length. + u32 param[]; ///< Parameter embedded struct. Must be word-aligned. +}; + +/// List of messages related to the task. +enum mm_msg_tag +{ + /// RESET Request. + MM_RESET_REQ = LMAC_FIRST_MSG(TASK_MM), + /// RESET Confirmation. + MM_RESET_CFM, + /// START Request. + MM_START_REQ, + /// START Confirmation. + MM_START_CFM, + /// Read Version Request. + MM_VERSION_REQ, + /// Read Version Confirmation. + MM_VERSION_CFM, + /// ADD INTERFACE Request. + MM_ADD_IF_REQ, + /// ADD INTERFACE Confirmation. + MM_ADD_IF_CFM, + /// REMOVE INTERFACE Request. + MM_REMOVE_IF_REQ, + /// REMOVE INTERFACE Confirmation. + MM_REMOVE_IF_CFM, + /// STA ADD Request. + MM_STA_ADD_REQ, + /// STA ADD Confirm. + MM_STA_ADD_CFM, + /// STA DEL Request. + MM_STA_DEL_REQ, + /// STA DEL Confirm. + MM_STA_DEL_CFM, + /// RX FILTER CONFIGURATION Request. + MM_SET_FILTER_REQ, + /// RX FILTER CONFIGURATION Confirmation. + MM_SET_FILTER_CFM, + /// CHANNEL CONFIGURATION Request. + MM_SET_CHANNEL_REQ, + /// CHANNEL CONFIGURATION Confirmation. + MM_SET_CHANNEL_CFM, + /// DTIM PERIOD CONFIGURATION Request. + MM_SET_DTIM_REQ, + /// DTIM PERIOD CONFIGURATION Confirmation. + MM_SET_DTIM_CFM, + /// BEACON INTERVAL CONFIGURATION Request. + MM_SET_BEACON_INT_REQ, + /// BEACON INTERVAL CONFIGURATION Confirmation. + MM_SET_BEACON_INT_CFM, + /// BASIC RATES CONFIGURATION Request. + MM_SET_BASIC_RATES_REQ, + /// BASIC RATES CONFIGURATION Confirmation. + MM_SET_BASIC_RATES_CFM, + /// BSSID CONFIGURATION Request. + MM_SET_BSSID_REQ, + /// BSSID CONFIGURATION Confirmation. + MM_SET_BSSID_CFM, + /// EDCA PARAMETERS CONFIGURATION Request. + MM_SET_EDCA_REQ, + /// EDCA PARAMETERS CONFIGURATION Confirmation. + MM_SET_EDCA_CFM, + /// ABGN MODE CONFIGURATION Request. + MM_SET_MODE_REQ, + /// ABGN MODE CONFIGURATION Confirmation. + MM_SET_MODE_CFM, + /// Request setting the VIF active state (i.e associated or AP started) + MM_SET_VIF_STATE_REQ, + /// Confirmation of the @ref MM_SET_VIF_STATE_REQ message. + MM_SET_VIF_STATE_CFM, + /// SLOT TIME PARAMETERS CONFIGURATION Request. + MM_SET_SLOTTIME_REQ, + /// SLOT TIME PARAMETERS CONFIGURATION Confirmation. + MM_SET_SLOTTIME_CFM, + /// Power Mode Change Request. + MM_SET_IDLE_REQ, + /// Power Mode Change Confirm. + MM_SET_IDLE_CFM, + /// KEY ADD Request. + MM_KEY_ADD_REQ, + /// KEY ADD Confirm. + MM_KEY_ADD_CFM, + /// KEY DEL Request. + MM_KEY_DEL_REQ, + /// KEY DEL Confirm. + MM_KEY_DEL_CFM, + /// Block Ack agreement info addition + MM_BA_ADD_REQ, + /// Block Ack agreement info addition confirmation + MM_BA_ADD_CFM, + /// Block Ack agreement info deletion + MM_BA_DEL_REQ, + /// Block Ack agreement info deletion confirmation + MM_BA_DEL_CFM, + /// Indication of the primary TBTT to the upper MAC. Upon the reception of this + // message the upper MAC has to push the beacon(s) to the beacon transmission queue. + MM_PRIMARY_TBTT_IND, + /// Indication of the secondary TBTT to the upper MAC. Upon the reception of this + // message the upper MAC has to push the beacon(s) to the beacon transmission queue. + MM_SECONDARY_TBTT_IND, + /// Request for changing the TX power + MM_SET_POWER_REQ, + /// Confirmation of the TX power change + MM_SET_POWER_CFM, + /// Request to the LMAC to trigger the embedded logic analyzer and forward the debug + /// dump. + MM_DBG_TRIGGER_REQ, + /// Set Power Save mode + MM_SET_PS_MODE_REQ, + /// Set Power Save mode confirmation + MM_SET_PS_MODE_CFM, + /// Request to add a channel context + MM_CHAN_CTXT_ADD_REQ, + /// Confirmation of the channel context addition + MM_CHAN_CTXT_ADD_CFM, + /// Request to delete a channel context + MM_CHAN_CTXT_DEL_REQ, + /// Confirmation of the channel context deletion + MM_CHAN_CTXT_DEL_CFM, + /// Request to link a channel context to a VIF + MM_CHAN_CTXT_LINK_REQ, + /// Confirmation of the channel context link + MM_CHAN_CTXT_LINK_CFM, + /// Request to unlink a channel context from a VIF + MM_CHAN_CTXT_UNLINK_REQ, + /// Confirmation of the channel context unlink + MM_CHAN_CTXT_UNLINK_CFM, + /// Request to update a channel context + MM_CHAN_CTXT_UPDATE_REQ, + /// Confirmation of the channel context update + MM_CHAN_CTXT_UPDATE_CFM, + /// Request to schedule a channel context + MM_CHAN_CTXT_SCHED_REQ, + /// Confirmation of the channel context scheduling + MM_CHAN_CTXT_SCHED_CFM, + /// Request to change the beacon template in LMAC + MM_BCN_CHANGE_REQ, + /// Confirmation of the beacon change + MM_BCN_CHANGE_CFM, + /// Request to update the TIM in the beacon (i.e to indicate traffic bufferized at AP) + MM_TIM_UPDATE_REQ, + /// Confirmation of the TIM update + MM_TIM_UPDATE_CFM, + /// Connection loss indication + MM_CONNECTION_LOSS_IND, + /// Channel context switch indication to the upper layers + MM_CHANNEL_SWITCH_IND, + /// Channel context pre-switch indication to the upper layers + MM_CHANNEL_PRE_SWITCH_IND, + /// Request to remain on channel or cancel remain on channel + MM_REMAIN_ON_CHANNEL_REQ, + /// Confirmation of the (cancel) remain on channel request + MM_REMAIN_ON_CHANNEL_CFM, + /// Remain on channel expired indication + MM_REMAIN_ON_CHANNEL_EXP_IND, + /// Indication of a PS state change of a peer device + MM_PS_CHANGE_IND, + /// Indication that some buffered traffic should be sent to the peer device + MM_TRAFFIC_REQ_IND, + /// Request to modify the STA Power-save mode options + MM_SET_PS_OPTIONS_REQ, + /// Confirmation of the PS options setting + MM_SET_PS_OPTIONS_CFM, + /// Indication of PS state change for a P2P VIF + MM_P2P_VIF_PS_CHANGE_IND, + /// Indication that CSA counter has been updated + MM_CSA_COUNTER_IND, + /// Channel occupation report indication + MM_CHANNEL_SURVEY_IND, + /// Message containing Beamformer Information + MM_BFMER_ENABLE_REQ, + /// Request to Start/Stop/Update NOA - GO Only + MM_SET_P2P_NOA_REQ, + /// Request to Start/Stop/Update Opportunistic PS - GO Only + MM_SET_P2P_OPPPS_REQ, + /// Start/Stop/Update NOA Confirmation + MM_SET_P2P_NOA_CFM, + /// Start/Stop/Update Opportunistic PS Confirmation + MM_SET_P2P_OPPPS_CFM, + /// P2P NoA Update Indication - GO Only + MM_P2P_NOA_UPD_IND, + /// Request to set RSSI threshold and RSSI hysteresis + MM_CFG_RSSI_REQ, + /// Indication that RSSI level is below or above the threshold + MM_RSSI_STATUS_IND, + /// Indication that CSA is done + MM_CSA_FINISH_IND, + /// Indication that CSA is in prorgess (resp. done) and traffic must be stopped (resp. restarted) + MM_CSA_TRAFFIC_IND, + /// Request to update the group information of a station + MM_MU_GROUP_UPDATE_REQ, + /// Confirmation of the @ref MM_MU_GROUP_UPDATE_REQ message + MM_MU_GROUP_UPDATE_CFM, + /// Request to initialize the antenna diversity algorithm + MM_ANT_DIV_INIT_REQ, + /// Request to stop the antenna diversity algorithm + MM_ANT_DIV_STOP_REQ, + /// Request to update the antenna switch status + MM_ANT_DIV_UPDATE_REQ, + /// Request to switch the antenna connected to path_0 + MM_SWITCH_ANTENNA_REQ, + /// Indication that a packet loss has occurred + MM_PKTLOSS_IND, + + MM_SET_ARPOFFLOAD_REQ, + MM_SET_ARPOFFLOAD_CFM, + MM_SET_AGG_DISABLE_REQ, + MM_SET_AGG_DISABLE_CFM, + MM_SET_COEX_REQ, + MM_SET_COEX_CFM, + MM_SET_RF_CONFIG_REQ, + MM_SET_RF_CONFIG_CFM, + MM_SET_RF_CALIB_REQ, + MM_SET_RF_CALIB_CFM, + + /// MU EDCA PARAMETERS Configuration Request. + MM_SET_MU_EDCA_REQ, + /// MU EDCA PARAMETERS Configuration Confirmation. + MM_SET_MU_EDCA_CFM, + /// UORA PARAMETERS Configuration Request. + MM_SET_UORA_REQ, + /// UORA PARAMETERS Configuration Confirmation. + MM_SET_UORA_CFM, + /// TXOP RTS THRESHOLD Configuration Request. + MM_SET_TXOP_RTS_THRES_REQ, + /// TXOP RTS THRESHOLD Configuration Confirmation. + MM_SET_TXOP_RTS_THRES_CFM, + /// HE BSS Color Configuration Request. + MM_SET_BSS_COLOR_REQ, + /// HE BSS Color Configuration Confirmation. + MM_SET_BSS_COLOR_CFM, + + MM_GET_MAC_ADDR_REQ, + MM_GET_MAC_ADDR_CFM, + + MM_GET_STA_INFO_REQ, + MM_GET_STA_INFO_CFM, + + MM_SET_TXPWR_IDX_LVL_REQ, + MM_SET_TXPWR_IDX_LVL_CFM, + + MM_SET_TXPWR_OFST_REQ, + MM_SET_TXPWR_OFST_CFM, + + MM_SET_STACK_START_REQ, + MM_SET_STACK_START_CFM, + + MM_APM_STALOSS_IND, + + MM_SET_VENDOR_HWCONFIG_REQ, + MM_SET_VENDOR_HWCONFIG_CFM, + + MM_GET_FW_VERSION_REQ, + MM_GET_FW_VERSION_CFM, + + MM_SET_RESUME_RESTORE_REQ, + MM_SET_RESUME_RESTORE_CFM, + + MM_GET_WIFI_DISABLE_REQ, + MM_GET_WIFI_DISABLE_CFM, + + MM_CFG_RSSI_CFM, + + MM_SET_VENDOR_SWCONFIG_REQ, + MM_SET_VENDOR_SWCONFIG_CFM, + + MM_SET_TXPWR_LVL_ADJ_REQ, + MM_SET_TXPWR_LVL_ADJ_CFM, + + MM_RADAR_DETECT_IND, + + MM_SET_APF_PROG_REQ, + MM_SET_APF_PROG_CFM, + + MM_GET_APF_PROG_REQ, + MM_GET_APF_PROG_CFM, + + MM_SET_TXPWR_PER_STA_REQ, + MM_SET_TXPWR_PER_STA_CFM, + + /// MAX number of messages + MM_MAX, +}; + +/// Interface types +enum +{ + /// ESS STA interface + MM_STA, + /// IBSS STA interface + MM_IBSS, + /// AP interface + MM_AP, + // Mesh Point interface + MM_MESH_POINT, + // Monitor interface + MM_MONITOR, +}; + +///BA agreement types +enum +{ + ///BlockAck agreement for TX + BA_AGMT_TX, + ///BlockAck agreement for RX + BA_AGMT_RX, +}; + +///BA agreement related status +enum +{ + ///Correct BA agreement establishment + BA_AGMT_ESTABLISHED, + ///BA agreement already exists for STA+TID requested, cannot override it (should have been deleted first) + BA_AGMT_ALREADY_EXISTS, + ///Correct BA agreement deletion + BA_AGMT_DELETED, + ///BA agreement for the (STA, TID) doesn't exist so nothing to delete + BA_AGMT_DOESNT_EXIST, +}; + +/// Features supported by LMAC - Positions +enum mm_features +{ + /// Beaconing + MM_FEAT_BCN_BIT = 0, + /// Autonomous Beacon Transmission + MM_FEAT_AUTOBCN_BIT, + /// Scan in LMAC + MM_FEAT_HWSCAN_BIT, + /// Connection Monitoring + MM_FEAT_CMON_BIT, + /// Multi Role + MM_FEAT_MROLE_BIT, + /// Radar Detection + MM_FEAT_RADAR_BIT, + /// Power Save + MM_FEAT_PS_BIT, + /// UAPSD + MM_FEAT_UAPSD_BIT, + /// DPSM + MM_FEAT_DPSM_BIT, + /// A-MPDU + MM_FEAT_AMPDU_BIT, + /// A-MSDU + MM_FEAT_AMSDU_BIT, + /// Channel Context + MM_FEAT_CHNL_CTXT_BIT, + /// Packet reordering + MM_FEAT_REORD_BIT, + /// P2P + MM_FEAT_P2P_BIT, + /// P2P Go + MM_FEAT_P2P_GO_BIT, + /// UMAC Present + MM_FEAT_UMAC_BIT, + /// VHT support + MM_FEAT_VHT_BIT, + /// Beamformee + MM_FEAT_BFMEE_BIT, + /// Beamformer + MM_FEAT_BFMER_BIT, + /// WAPI + MM_FEAT_WAPI_BIT, + /// MFP + MM_FEAT_MFP_BIT, + /// Mu-MIMO RX support + MM_FEAT_MU_MIMO_RX_BIT, + /// Mu-MIMO TX support + MM_FEAT_MU_MIMO_TX_BIT, + /// Wireless Mesh Networking + MM_FEAT_MESH_BIT, + /// TDLS support + MM_FEAT_TDLS_BIT, + /// Antenna Diversity support + MM_FEAT_ANT_DIV_BIT, + /// UF support + MM_FEAT_UF_BIT, + /// A-MSDU maximum size (bit0) + MM_AMSDU_MAX_SIZE_BIT0, + /// A-MSDU maximum size (bit1) + MM_AMSDU_MAX_SIZE_BIT1, + /// MON_DATA support + MM_FEAT_MON_DATA_BIT, + /// HE (802.11ax) support + MM_FEAT_HE_BIT, +}; + +/// Maximum number of words in the configuration buffer +#define PHY_CFG_BUF_SIZE 16 + +/// Structure containing the parameters of the PHY configuration +struct phy_cfg_tag +{ + /// Buffer containing the parameters specific for the PHY used + u32_l parameters[PHY_CFG_BUF_SIZE]; +}; + +/// Structure containing the parameters of the Trident PHY configuration +struct phy_trd_cfg_tag +{ + /// MDM type(nxm)(upper nibble) and MDM2RF path mapping(lower nibble) + u8_l path_mapping; + /// TX DC offset compensation + u32_l tx_dc_off_comp; +}; + +/// Structure containing the parameters of the Karst PHY configuration +struct phy_karst_cfg_tag +{ + /// TX IQ mismatch compensation in 2.4GHz + u32_l tx_iq_comp_2_4G[2]; + /// RX IQ mismatch compensation in 2.4GHz + u32_l rx_iq_comp_2_4G[2]; + /// TX IQ mismatch compensation in 5GHz + u32_l tx_iq_comp_5G[2]; + /// RX IQ mismatch compensation in 5GHz + u32_l rx_iq_comp_5G[2]; + /// RF path used by default (0 or 1) + u8_l path_used; +}; + +/// Structure containing the parameters of the @ref MM_START_REQ message +struct mm_start_req +{ + /// PHY configuration + struct phy_cfg_tag phy_cfg; + /// UAPSD timeout + u32_l uapsd_timeout; + /// Local LP clock accuracy (in ppm) + u16_l lp_clk_accuracy; +}; + +/// Structure containing the parameters of the @ref MM_SET_CHANNEL_REQ message +struct mm_set_channel_req +{ + /// Channel information + struct mac_chan_op chan; + /// Index of the RF for which the channel has to be set (0: operating (primary), 1: secondary + /// RF (used for additional radar detection). This parameter is reserved if no secondary RF + /// is available in the system + u8_l index; +}; + +/// Structure containing the parameters of the @ref MM_SET_CHANNEL_CFM message +struct mm_set_channel_cfm +{ + /// Radio index to be used in policy table + u8_l radio_idx; + /// TX power configured (in dBm) + s8_l power; +}; + +/// Structure containing the parameters of the @ref MM_SET_DTIM_REQ message +struct mm_set_dtim_req +{ + /// DTIM period + u8_l dtim_period; +}; + +/// Structure containing the parameters of the @ref MM_SET_POWER_REQ message +struct mm_set_power_req +{ + /// Index of the interface for which the parameter is configured + u8_l inst_nbr; + /// TX power (in dBm) + s8_l power; +}; + +/// Structure containing the parameters of the @ref MM_SET_POWER_CFM message +struct mm_set_power_cfm +{ + /// Radio index to be used in policy table + u8_l radio_idx; + /// TX power configured (in dBm) + s8_l power; +}; + +/// Structure containing the parameters of the @ref MM_SET_BEACON_INT_REQ message +struct mm_set_beacon_int_req +{ + /// Beacon interval + u16_l beacon_int; + /// Index of the interface for which the parameter is configured + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_SET_BASIC_RATES_REQ message +struct mm_set_basic_rates_req +{ + /// Basic rate set (as expected by bssBasicRateSet field of Rates MAC HW register) + u32_l rates; + /// Index of the interface for which the parameter is configured + u8_l inst_nbr; + /// Band on which the interface will operate + u8_l band; +}; + +/// Structure containing the parameters of the @ref MM_SET_BSSID_REQ message +struct mm_set_bssid_req +{ + /// BSSID to be configured in HW + struct mac_addr bssid; + /// Index of the interface for which the parameter is configured + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_SET_FILTER_REQ message +struct mm_set_filter_req +{ + /// RX filter to be put into rxCntrlReg HW register + u32_l filter; +}; + +/// Structure containing the parameters of the @ref MM_ADD_IF_REQ message. +struct mm_add_if_req +{ + /// Type of the interface (AP, STA, ADHOC, ...) + u8_l type; + /// MAC ADDR of the interface to start + struct mac_addr addr; + /// P2P Interface + bool_l p2p; +}; + +/// Structure containing the parameters of the @ref MM_SET_EDCA_REQ message +struct mm_set_edca_req +{ + /// EDCA parameters of the queue (as expected by edcaACxReg HW register) + u32_l ac_param; + /// Flag indicating if UAPSD can be used on this queue + bool_l uapsd; + /// HW queue for which the parameters are configured + u8_l hw_queue; + /// Index of the interface for which the parameters are configured + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_SET_MU_EDCA_REQ message +struct mm_set_mu_edca_req +{ + /// MU EDCA parameters of the different HE queues + u32_l param[AC_MAX]; +}; + +/// Structure containing the parameters of the @ref MM_SET_UORA_REQ message +struct mm_set_uora_req +{ + /// Minimum exponent of OFDMA Contention Window. + u8_l eocw_min; + /// Maximum exponent of OFDMA Contention Window. + u8_l eocw_max; +}; + +/// Structure containing the parameters of the @ref MM_SET_TXOP_RTS_THRES_REQ message +struct mm_set_txop_rts_thres_req +{ + /// TXOP RTS threshold + u16_l txop_dur_rts_thres; + /// Index of the interface for which the parameter is configured + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_SET_BSS_COLOR_REQ message +struct mm_set_bss_color_req +{ + /// HE BSS color, formatted as per BSS_COLOR MAC HW register + u32_l bss_color; +}; + +struct mm_set_idle_req +{ + u8_l hw_idle; +}; + +/// Structure containing the parameters of the @ref MM_SET_SLOTTIME_REQ message +struct mm_set_slottime_req +{ + /// Slot time expressed in us + u8_l slottime; +}; + +/// Structure containing the parameters of the @ref MM_SET_MODE_REQ message +struct mm_set_mode_req +{ + /// abgnMode field of macCntrl1Reg register + u8_l abgnmode; +}; + +/// Structure containing the parameters of the @ref MM_SET_VIF_STATE_REQ message +struct mm_set_vif_state_req +{ + /// Association Id received from the AP (valid only if the VIF is of STA type) + u16_l aid; + /// Flag indicating if the VIF is active or not + bool_l active; + /// Interface index + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_ADD_IF_CFM message. +struct mm_add_if_cfm +{ + /// Status of operation (different from 0 if unsuccessful) + u8_l status; + /// Interface index assigned by the LMAC + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_REMOVE_IF_REQ message. +struct mm_remove_if_req +{ + /// Interface index assigned by the LMAC + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_VERSION_CFM message. +struct mm_version_cfm +{ + /// Version of the LMAC FW + u32_l version_lmac; + /// Version1 of the MAC HW (as encoded in version1Reg MAC HW register) + u32_l version_machw_1; + /// Version2 of the MAC HW (as encoded in version2Reg MAC HW register) + u32_l version_machw_2; + /// Version1 of the PHY (depends on actual PHY) + u32_l version_phy_1; + /// Version2 of the PHY (depends on actual PHY) + u32_l version_phy_2; + /// Supported Features + u32_l features; + /// Maximum number of supported stations + u16_l max_sta_nb; + /// Maximum number of supported virtual interfaces + u8_l max_vif_nb; +}; + +/// Structure containing the parameters of the @ref MM_STA_ADD_REQ message. +struct mm_sta_add_req +{ + /// Maximum A-MPDU size, in bytes, for HE frames + u32_l ampdu_size_max_he; + /// Maximum A-MPDU size, in bytes, for VHT frames + u32_l ampdu_size_max_vht; + /// PAID/GID + u32_l paid_gid; + /// Maximum A-MPDU size, in bytes, for HT frames + u16_l ampdu_size_max_ht; + /// MAC address of the station to be added + struct mac_addr mac_addr; + /// A-MPDU spacing, in us + u8_l ampdu_spacing_min; + /// Interface index + u8_l inst_nbr; + /// TDLS station + bool_l tdls_sta; + /// Indicate if the station is TDLS link initiator station + bool_l tdls_sta_initiator; + /// Indicate if the TDLS Channel Switch is allowed + bool_l tdls_chsw_allowed; +}; + +/// Structure containing the parameters of the @ref MM_STA_ADD_CFM message. +struct mm_sta_add_cfm +{ + /// Status of the operation (different from 0 if unsuccessful) + u8_l status; + /// Index assigned by the LMAC to the newly added station + u8_l sta_idx; + /// MAC HW index of the newly added station + u8_l hw_sta_idx; +}; + +/// Structure containing the parameters of the @ref MM_STA_DEL_REQ message. +struct mm_sta_del_req +{ + /// Index of the station to be deleted + u8_l sta_idx; +}; + +/// Structure containing the parameters of the @ref MM_STA_DEL_CFM message. +struct mm_sta_del_cfm +{ + /// Status of the operation (different from 0 if unsuccessful) + u8_l status; +}; + +/// Structure containing the parameters of the SET_POWER_MODE REQ message. +struct mm_setpowermode_req +{ + u8_l mode; + u8_l sta_idx; +}; + +/// Structure containing the parameters of the SET_POWER_MODE CFM message. +struct mm_setpowermode_cfm +{ + u8_l status; +}; + +/// Structure containing the parameters of the @ref MM_KEY_ADD REQ message. +struct mm_key_add_req +{ + /// Key index (valid only for default keys) + u8_l key_idx; + /// STA index (valid only for pairwise or mesh group keys) + u8_l sta_idx; + /// Key material + struct mac_sec_key key; + /// Cipher suite (WEP64, WEP128, TKIP, CCMP) + u8_l cipher_suite; + /// Index of the interface for which the key is set (valid only for default keys or mesh group keys) + u8_l inst_nbr; + /// A-MSDU SPP parameter + u8_l spp; + /// Indicate if provided key is a pairwise key or not + bool_l pairwise; +}; + +/// Structure containing the parameters of the @ref MM_KEY_ADD_CFM message. +struct mm_key_add_cfm +{ + /// Status of the operation (different from 0 if unsuccessful) + u8_l status; + /// HW index of the key just added + u8_l hw_key_idx; +}; + +/// Structure containing the parameters of the @ref MM_KEY_DEL_REQ message. +struct mm_key_del_req +{ + /// HW index of the key to be deleted + u8_l hw_key_idx; +}; + +/// Structure containing the parameters of the @ref MM_BA_ADD_REQ message. +struct mm_ba_add_req +{ + ///Type of agreement (0: TX, 1: RX) + u8_l type; + ///Index of peer station with which the agreement is made + u8_l sta_idx; + ///TID for which the agreement is made with peer station + u8_l tid; + ///Buffer size - number of MPDUs that can be held in its buffer per TID + u8_l bufsz; + /// Start sequence number negotiated during BA setup - the one in first aggregated MPDU counts more + u16_l ssn; +}; + +/// Structure containing the parameters of the @ref MM_BA_ADD_CFM message. +struct mm_ba_add_cfm +{ + ///Index of peer station for which the agreement is being confirmed + u8_l sta_idx; + ///TID for which the agreement is being confirmed + u8_l tid; + /// Status of ba establishment + u8_l status; + u8_l alligned; +}; + +/// Structure containing the parameters of the @ref MM_BA_DEL_REQ message. +struct mm_ba_del_req +{ + ///Type of agreement (0: TX, 1: RX) + u8_l type; + ///Index of peer station for which the agreement is being deleted + u8_l sta_idx; + ///TID for which the agreement is being deleted + u8_l tid; +}; + +/// Structure containing the parameters of the @ref MM_BA_DEL_CFM message. +struct mm_ba_del_cfm +{ + ///Index of peer station for which the agreement deletion is being confirmed + u8_l sta_idx; + ///TID for which the agreement deletion is being confirmed + u8_l tid; + /// Status of ba deletion + u8_l status; +}; + +/// Structure containing the parameters of the @ref MM_CHAN_CTXT_ADD_REQ message +struct mm_chan_ctxt_add_req +{ + /// Operating channel + struct mac_chan_op chan; +}; + +/// Structure containing the parameters of the @ref MM_CHAN_CTXT_ADD_REQ message +struct mm_chan_ctxt_add_cfm +{ + /// Status of the addition + u8_l status; + /// Index of the new channel context + u8_l index; +}; + + +/// Structure containing the parameters of the @ref MM_CHAN_CTXT_DEL_REQ message +struct mm_chan_ctxt_del_req +{ + /// Index of the new channel context to be deleted + u8_l index; +}; + + +/// Structure containing the parameters of the @ref MM_CHAN_CTXT_LINK_REQ message +struct mm_chan_ctxt_link_req +{ + /// VIF index + u8_l vif_index; + /// Channel context index + u8_l chan_index; + /// Indicate if this is a channel switch (unlink current ctx first if true) + u8_l chan_switch; +}; + +/// Structure containing the parameters of the @ref MM_CHAN_CTXT_UNLINK_REQ message +struct mm_chan_ctxt_unlink_req +{ + /// VIF index + u8_l vif_index; +}; + +/// Structure containing the parameters of the @ref MM_CHAN_CTXT_UPDATE_REQ message +struct mm_chan_ctxt_update_req +{ + /// Channel context index + u8_l chan_index; + /// New channel information + struct mac_chan_op chan; +}; + +/// Structure containing the parameters of the @ref MM_CHAN_CTXT_SCHED_REQ message +struct mm_chan_ctxt_sched_req +{ + /// VIF index + u8_l vif_index; + /// Channel context index + u8_l chan_index; + /// Type of the scheduling request (0: normal scheduling, 1: derogatory + /// scheduling) + u8_l type; +}; + +/// Structure containing the parameters of the @ref MM_CHANNEL_SWITCH_IND message +struct mm_channel_switch_ind +{ + /// Index of the channel context we will switch to + u8_l chan_index; + /// Indicate if the switch has been triggered by a Remain on channel request + bool_l roc; + /// VIF on which remain on channel operation has been started (if roc == 1) + u8_l vif_index; + /// Indicate if the switch has been triggered by a TDLS Remain on channel request + bool_l roc_tdls; +}; + +/// Structure containing the parameters of the @ref MM_CHANNEL_PRE_SWITCH_IND message +struct mm_channel_pre_switch_ind +{ + /// Index of the channel context we will switch to + u8_l chan_index; +}; + +/// Structure containing the parameters of the @ref MM_CONNECTION_LOSS_IND message. +struct mm_connection_loss_ind +{ + /// VIF instance number + u8_l inst_nbr; +}; + + +/// Structure containing the parameters of the @ref MM_DBG_TRIGGER_REQ message. +struct mm_dbg_trigger_req +{ + /// Error trace to be reported by the LMAC + char error[64]; +}; + +/// Structure containing the parameters of the @ref MM_SET_PS_MODE_REQ message. +struct mm_set_ps_mode_req +{ + /// Power Save is activated or deactivated + u8_l new_state; +}; + +/// Structure containing the parameters of the @ref MM_BCN_CHANGE_REQ message. +#define BCN_MAX_CSA_CPT 2 +struct mm_bcn_change_req +{ + /// Pointer, in host memory, to the new beacon template + u32_l bcn_ptr; + /// Length of the beacon template + u16_l bcn_len; + /// Offset of the TIM IE in the beacon + u16_l tim_oft; + /// Length of the TIM IE + u8_l tim_len; + /// Index of the VIF for which the beacon is updated + u8_l inst_nbr; + /// Offset of CSA (channel switch announcement) counters (0 means no counter) + u8_l csa_oft[BCN_MAX_CSA_CPT]; +}; + + +/// Structure containing the parameters of the @ref MM_TIM_UPDATE_REQ message. +struct mm_tim_update_req +{ + /// Association ID of the STA the bit of which has to be updated (0 for BC/MC traffic) + u16_l aid; + /// Flag indicating the availability of data packets for the given STA + u8_l tx_avail; + /// Index of the VIF for which the TIM is updated + u8_l inst_nbr; +}; + +/// Structure containing the parameters of the @ref MM_REMAIN_ON_CHANNEL_REQ message. +struct mm_remain_on_channel_req +{ + /// Operation Code + u8_l op_code; + /// VIF Index + u8_l vif_index; + /// Band (2.4GHz or 5GHz) + u8_l band; + /// Channel type: 20,40,80,160 or 80+80 MHz + u8_l type; + /// Frequency for Primary 20MHz channel (in MHz) + u16_l prim20_freq; + /// Frequency for Center of the contiguous channel or center of Primary 80+80 + u16_l center1_freq; + /// Frequency for Center of the non-contiguous secondary 80+80 + u16_l center2_freq; + /// Duration (in ms) + u32_l duration_ms; + /// TX power (in dBm) + s8_l tx_power; +}; + +/// Structure containing the parameters of the @ref MM_REMAIN_ON_CHANNEL_CFM message +struct mm_remain_on_channel_cfm +{ + /// Operation Code + u8_l op_code; + /// Status of the operation + u8_l status; + /// Channel Context index + u8_l chan_ctxt_index; +}; + +/// Structure containing the parameters of the @ref MM_REMAIN_ON_CHANNEL_EXP_IND message +struct mm_remain_on_channel_exp_ind +{ + /// VIF Index + u8_l vif_index; + /// Channel Context index + u8_l chan_ctxt_index; +}; + +/// Structure containing the parameters of the @ref MM_SET_UAPSD_TMR_REQ message. +struct mm_set_uapsd_tmr_req +{ + /// action: Start or Stop the timer + u8_l action; + /// timeout value, in milliseconds + u32_l timeout; +}; + +/// Structure containing the parameters of the @ref MM_SET_UAPSD_TMR_CFM message. +struct mm_set_uapsd_tmr_cfm +{ + /// Status of the operation (different from 0 if unsuccessful) + u8_l status; +}; + + +/// Structure containing the parameters of the @ref MM_PS_CHANGE_IND message +struct mm_ps_change_ind +{ + /// Index of the peer device that is switching its PS state + u8_l sta_idx; + /// New PS state of the peer device (0: active, 1: sleeping) + u8_l ps_state; +}; + +/// Structure containing the parameters of the @ref MM_P2P_VIF_PS_CHANGE_IND message +struct mm_p2p_vif_ps_change_ind +{ + /// Index of the P2P VIF that is switching its PS state + u8_l vif_index; + /// New PS state of the P2P VIF interface (0: active, 1: sleeping) + u8_l ps_state; +}; + +/// Structure containing the parameters of the @ref MM_TRAFFIC_REQ_IND message +struct mm_traffic_req_ind +{ + /// Index of the peer device that needs traffic + u8_l sta_idx; + /// Number of packets that need to be sent (if 0, all buffered traffic shall be sent and + /// if set to @ref PS_SP_INTERRUPTED, it means that current service period has been interrupted) + u8_l pkt_cnt; + /// Flag indicating if the traffic request concerns U-APSD queues or not + bool_l uapsd; +}; + +/// Structure containing the parameters of the @ref MM_SET_PS_OPTIONS_REQ message. +struct mm_set_ps_options_req +{ + /// VIF Index + u8_l vif_index; + /// Listen interval (0 if wake up shall be based on DTIM period) + u16_l listen_interval; + /// Flag indicating if we shall listen the BC/MC traffic or not + bool_l dont_listen_bc_mc; +}; + +/// Structure containing the parameters of the @ref MM_CSA_COUNTER_IND message +struct mm_csa_counter_ind +{ + /// Index of the VIF + u8_l vif_index; + /// Updated CSA counter value + u8_l csa_count; +}; + +/// Structure containing the parameters of the @ref MM_CHANNEL_SURVEY_IND message +struct mm_channel_survey_ind +{ + /// Frequency of the channel + u16_l freq; + /// Noise in dbm + s8_l noise_dbm; + /// Amount of time spent of the channel (in ms) + u32_l chan_time_ms; + /// Amount of time the primary channel was sensed busy + u32_l chan_time_busy_ms; +}; + +/// Structure containing the parameters of the @ref MM_BFMER_ENABLE_REQ message. +struct mm_bfmer_enable_req +{ + /** + * Address of the beamforming report space allocated in host memory + * (Valid only if vht_su_bfmee is true) + */ + u32_l host_bfr_addr; + /** + * Size of the beamforming report space allocated in host memory. This space should + * be twice the maximum size of the expected beamforming reports as the FW will + * divide it in two in order to be able to upload a new report while another one is + * used in transmission + */ + u16_l host_bfr_size; + /// AID + u16_l aid; + /// Station Index + u8_l sta_idx; + /// Maximum number of spatial streams the station can receive + u8_l rx_nss; + /** + * Indicate if peer STA is MU Beamformee (VHT) capable + * (Valid only if vht_su_bfmee is true) + */ + bool_l vht_mu_bfmee; +}; + +/// Structure containing the parameters of the @ref MM_SET_P2P_NOA_REQ message. +struct mm_set_p2p_noa_req +{ + /// VIF Index + u8_l vif_index; + /// Allocated NOA Instance Number - Valid only if count = 0 + u8_l noa_inst_nb; + /// Count + u8_l count; + /// Indicate if NoA can be paused for traffic reason + bool_l dyn_noa; + /// Duration (in us) + u32_l duration_us; + /// Interval (in us) + u32_l interval_us; + /// Start Time offset from next TBTT (in us) + u32_l start_offset; +}; + +#ifdef AICWF_ARP_OFFLOAD +struct mm_set_arpoffload_en_req +{ + u32_l ipaddr; + u8_l enable; + u8_l vif_idx; +}; + +struct mm_set_arpoffload_en_cfm +{ + u8_l status; +}; +#endif + +struct mm_set_agg_disable_req +{ + u8_l disable; + u8_l staidx; + u8_l disable_rx; +}; + +struct mm_set_coex_req +{ + u8_l bt_on; + u8_l disable_coexnull; + u8_l enable_nullcts; + u8_l enable_periodic_timer; + u8_l coex_timeslot_set; + u32_l coex_timeslot[2]; +}; + +#if 0 +struct mm_set_rf_config_req +{ + u8_l def_band; + u8_l config_type; + u16_l offset; + u16_l len; + u16_l set; + u32_l rx_gain_24g[48][4]; + u32_l rx_gain_5g[32][4]; + u32_l tx_gain[32]; +}; +#endif +struct mm_set_rf_config_req +{ + u8_l table_sel; + u8_l table_ofst; + u8_l table_num; + u8_l deft_page; + u32_l data[64]; +}; + +typedef struct +{ + u32_l magic_num; /*“GWCR” or ’SWCR”*/ + u32_l info_flag; + u32_l calib_flag; + u32_l reserved0; + u32_l res_data[536/sizeof(u32_l)]; +}wf_rf_calib_res_drv_t; + +#define DRIVER_GET_WIFI_CALRES_MAGIC_NUM 0x52435747 +#define DRIVER_SET_WIFI_CALRES_MAGIC_NUM 0x52435753 + +struct mm_set_rf_calib_req +{ + u32_l cal_cfg_24g; + u32_l cal_cfg_5g; + u32_l param_alpha; + u32_l bt_calib_en; + u32_l bt_calib_param; + u8_l xtal_cap; + u8_l xtal_cap_fine; + +}; + +struct mm_set_rf_calib_req_v2 +{ + u32_l cal_cfg_24g; + u32_l cal_cfg_5g; + u32_l param_alpha; + u32_l bt_calib_en; + u32_l bt_calib_param; + u8_l xtal_cap; + u8_l xtal_cap_fine; + u8_l reserved0[2]; + wf_rf_calib_res_drv_t cal_res; + +}; + +struct mm_set_rf_calib_cfm +{ + u32_l rxgain_24g_addr; + u32_l rxgain_5g_addr; + u32_l txgain_24g_addr; + u32_l txgain_5g_addr; +}; + +struct mm_set_rf_calib_cfm_v2 +{ + u32_l rxgain_24g_addr; + u32_l rxgain_5g_addr; + u32_l txgain_24g_addr; + u32_l txgain_5g_addr; + wf_rf_calib_res_drv_t cal_res; +}; + +struct mm_get_mac_addr_req +{ + u32_l get; +}; + +struct mm_get_mac_addr_cfm +{ + u8_l mac_addr[6]; +}; + +struct mm_get_sta_info_req +{ + u8_l sta_idx; +}; + +struct mm_get_sta_info_cfm +{ + u32_l rate_info; + u32_l txfailed; + u8 rssi; + u8 reserved[3]; + u32_l chan_time; + u32_l chan_busy_time; + u32_l ack_fail_stat; + u32_l ack_succ_stat; + u32_l chan_tx_busy_time; +}; + +typedef struct +{ + u8_l enable; + u8_l dsss; + u8_l ofdmlowrate_2g4; + u8_l ofdm64qam_2g4; + u8_l ofdm256qam_2g4; + u8_l ofdm1024qam_2g4; + u8_l ofdmlowrate_5g; + u8_l ofdm64qam_5g; + u8_l ofdm256qam_5g; + u8_l ofdm1024qam_5g; +} txpwr_lvl_conf_t; + +typedef struct +{ + u8_l enable; + s8_l pwrlvl_11b_11ag_2g4[12]; + s8_l pwrlvl_11n_11ac_2g4[10]; + s8_l pwrlvl_11ax_2g4[12]; +} txpwr_lvl_conf_v2_t; + +typedef struct +{ + u8_l enable; + s8_l pwrlvl_11b_11ag_2g4[12]; + s8_l pwrlvl_11n_11ac_2g4[10]; + s8_l pwrlvl_11ax_2g4[12]; + s8_l pwrlvl_11a_5g[12]; + s8_l pwrlvl_11n_11ac_5g[10]; + s8_l pwrlvl_11ax_5g[12]; +} txpwr_lvl_conf_v3_t; + +typedef struct +{ + u8_l enable; + s8_l pwrlvl_11b_11ag_2g4[12]; + s8_l pwrlvl_11n_11ac_2g4[10]; + s8_l pwrlvl_11ax_2g4[12]; + s8_l pwrlvl_11a_5g[8]; + s8_l pwrlvl_11n_11ac_5g[10]; + s8_l pwrlvl_11ax_5g[12]; + s8_l pwrlvl_11a_6g[8]; + s8_l pwrlvl_11n_11ac_6g[10]; + s8_l pwrlvl_11ax_6g[12]; +} txpwr_lvl_conf_v4_t; + +typedef struct +{ + u8_l enable; + s8_l pwrlvl_adj_tbl_2g4[3]; + s8_l pwrlvl_adj_tbl_5g[6]; +} txpwr_lvl_adj_conf_t; + +struct mm_set_txpwr_lvl_req +{ + union { + txpwr_lvl_conf_t txpwr_lvl; + txpwr_lvl_conf_v2_t txpwr_lvl_v2; + txpwr_lvl_conf_v3_t txpwr_lvl_v3; + txpwr_lvl_conf_v4_t txpwr_lvl_v4; + }; +}; + +struct mm_set_txpwr_lvl_adj_req +{ + txpwr_lvl_adj_conf_t txpwr_lvl_adj; +}; + +typedef struct +{ + u8_l loss_enable_2g4; + s8_l loss_value_2g4; + u8_l loss_enable_5g; + s8_l loss_value_5g; +} txpwr_loss_conf_t; + +typedef struct +{ + int8_t enable; + int8_t dsss; + int8_t ofdmlowrate_2g4; + int8_t ofdm64qam_2g4; + int8_t ofdm256qam_2g4; + int8_t ofdm1024qam_2g4; + int8_t ofdmlowrate_5g; + int8_t ofdm64qam_5g; + int8_t ofdm256qam_5g; + int8_t ofdm1024qam_5g; + +} txpwr_idx_conf_t; + +struct mm_set_txpwr_idx_req +{ + txpwr_idx_conf_t txpwr_idx; +}; + +typedef struct +{ + int8_t enable; + int8_t chan_1_4; + int8_t chan_5_9; + int8_t chan_10_13; + int8_t chan_36_64; + int8_t chan_100_120; + int8_t chan_122_140; + int8_t chan_142_165; +} txpwr_ofst_conf_t; + +/* + * pwrofst2x_tbl_2g4[3][3]: + * +---------------+----------+----------+----------+ + * | RateTyp\ChGrp | CH_1_4 | CH_5_9 | CH_10_13 | + * +---------------+----------+----------+----------+ + * | DSSS | [0][0] | [0][1] | [0][2] | + * +---------------+----------+----------+----------+ + * | OFDM_HIGHRATE | [1][0] | [1][1] | [1][2] | + * +---------------+----------+----------+----------+ + * | OFDM_LOWRATE | [2][0] | [2][1] | [2][2] | + * +---------------+----------+----------+----------+ + * pwrofst2x_tbl_5g[3][6]: + * +---------------+--------------+--------------+----------------+----------------+----------------+----------------+ + * | RateTyp\ChGrp | CH_42(36~50) | CH_58(51~64) | CH_106(98~114) | CH_122(115~130)| CH_138(131~146)| CH_155(147~166)| + * +---------------+--------------+--------------+----------------+----------------+----------------+----------------+ + * | OFDM_LOWRATE | [0][0] | [0][1] | [0][2] | [0][3] | [0][4] | [0][5] | + * +---------------+--------------+--------------+----------------+----------------+----------------+----------------+ + * | OFDM_HIGHRATE | [1][0] | [1][1] | [1][2] | [1][3] | [1][4] | [1][5] | + * +---------------+--------------+--------------+----------------+----------------+----------------+----------------+ + * | OFDM_MIDRATE | [2][0] | [2][1] | [2][2] | [2][3] | [2][4] | [2][5] | + * +---------------+--------------+--------------+----------------+----------------+----------------+----------------+ + */ + +typedef struct +{ + u8_l enable; + s8_l pwrofst2x_tbl_2g4[3][3]; + s8_l pwrofst2x_tbl_5g[3][6]; +} txpwr_ofst2x_conf_t; + +/* + * pwrofst2x_v2_tbl_2g4_ant0/1[3][3]: + * +---------------+----------+---------------+--------------+ + * | ChGrp\RateTyp | DSSS | OFDM_HIGHRATE | OFDM_LOWRATE | + * +---------------+----------+---------------+--------------+ + * | CH_1_4 | [0][0] | [0][1] | Reserved | + * +---------------+----------+---------------+--------------+ + * | CH_5_9 | [1][0] | [1][1] | Reserved | + * +---------------+----------+---------------+--------------+ + * | CH_10_13 | [2][0] | [2][1] | Reserved | + * +---------------+----------+---------------+--------------+ + * pwrofst2x_v2_tbl_5g_ant0/1[6][3]: + * +-----------------+---------------+--------------+--------------+ + * | ChGrp\RateTyp | OFDM_HIGHRATE | OFDM_LOWRATE | OFDM_MIDRATE | + * +-----------------+---------------+--------------+--------------+ + * | CH_42(36~50) | [0][0] | Reserved | Reserved | + * +-----------------+---------------+--------------+--------------+ + * | CH_58(51~64) | [1][0] | Reserved | Reserved | + * +-----------------+---------------+--------------+--------------+ + * | CH_106(98~114) | [2][0] | Reserved | Reserved | + * +-----------------+---------------+--------------+--------------+ + * | CH_122(115~130) | [3][0] | Reserved | Reserved | + * +-----------------+---------------+--------------+--------------+ + * | CH_138(131~146) | [4][0] | Reserved | Reserved | + * +-----------------+---------------+--------------+--------------+ + * | CH_155(147~166) | [5][0] | Reserved | Reserved | + * +-----------------+---------------+--------------+--------------+ + */ + +typedef struct +{ + u8_l enable; + u8_l pwrofst_flags; + s8_l pwrofst2x_tbl_2g4_ant0[3][3]; + s8_l pwrofst2x_tbl_2g4_ant1[3][3]; + s8_l pwrofst2x_tbl_5g_ant0[6][3]; + s8_l pwrofst2x_tbl_5g_ant1[6][3]; + s8_l pwrofst2x_tbl_6g_ant0[15]; + s8_l pwrofst2x_tbl_6g_ant1[15]; +} txpwr_ofst2x_conf_v2_t; + +struct mm_set_txpwr_ofst_req +{ + union { + txpwr_ofst_conf_t txpwr_ofst; + txpwr_ofst2x_conf_t txpwr_ofst2x; + txpwr_ofst2x_conf_v2_t txpwr_ofst2x_v2; + }; +}; + +typedef struct +{ + u8_l enable; + u8_l xtal_cap; + u8_l xtal_cap_fine; +} xtal_cap_conf_t; + + +struct mm_set_stack_start_req +{ + u8_l is_stack_start; + u8_l efuse_valid; + u8_l set_vendor_info; + u8_l fwtrace_redir; +}; + +struct mm_set_stack_start_cfm +{ + u8_l is_5g_support; + u8_l vendor_info; +}; + +/// Structure containing the parameters of the @ref MM_SET_P2P_OPPPS_REQ message. +struct mm_set_p2p_oppps_req +{ + /// VIF Index + u8_l vif_index; + /// CTWindow + u8_l ctwindow; +}; + +/// Structure containing the parameters of the @ref MM_SET_P2P_NOA_CFM message. +struct mm_set_p2p_noa_cfm +{ + /// Request status + u8_l status; +}; + +/// Structure containing the parameters of the @ref MM_SET_P2P_OPPPS_CFM message. +struct mm_set_p2p_oppps_cfm +{ + /// Request status + u8_l status; +}; + +/// Structure containing the parameters of the @ref MM_P2P_NOA_UPD_IND message. +struct mm_p2p_noa_upd_ind +{ + /// VIF Index + u8_l vif_index; + /// NOA Instance Number + u8_l noa_inst_nb; + /// NoA Type + u8_l noa_type; + /// Count + u8_l count; + /// Duration (in us) + u32_l duration_us; + /// Interval (in us) + u32_l interval_us; + /// Start Time + u32_l start_time; +}; + +/// Structure containing the parameters of the @ref MM_CFG_RSSI_REQ message +struct mm_cfg_rssi_req +{ + /// Index of the VIF + u8_l vif_index; + /// RSSI threshold + s8_l rssi_thold; + /// RSSI hysteresis + u8_l rssi_hyst; +}; + +/// Structure containing the parameters of the @ref MM_RSSI_STATUS_IND message +struct mm_rssi_status_ind +{ + /// Index of the VIF + u8_l vif_index; + /// Status of the RSSI + bool_l rssi_status; + /// Current RSSI + s8_l rssi; +}; + +/// Structure containing the parameters of the @ref MM_PKTLOSS_IND message +struct mm_pktloss_ind +{ + /// Index of the VIF + u8_l vif_index; + /// Address of the STA for which there is a packet loss + struct mac_addr mac_addr; + /// Number of packets lost + u32 num_packets; +}; + +/// Structure containing the parameters of the @ref MM_CSA_FINISH_IND message +struct mm_csa_finish_ind +{ + /// Index of the VIF + u8_l vif_index; + /// Status of the operation + u8_l status; + /// New channel ctx index + u8_l chan_idx; +}; + +/// Structure containing the parameters of the @ref MM_CSA_TRAFFIC_IND message +struct mm_csa_traffic_ind +{ + /// Index of the VIF + u8_l vif_index; + /// Is tx traffic enable or disable + bool_l enable; +}; + +/// Structure containing the parameters of the @ref MM_MU_GROUP_UPDATE_REQ message. +/// Size allocated for the structure depends of the number of group +struct mm_mu_group_update_req +{ + /// Station index + u8_l sta_idx; + /// Number of groups the STA belongs to + u8_l group_cnt; + /// Group information + struct + { + /// Group Id + u8_l group_id; + /// User position + u8_l user_pos; + } groups[0]; +}; + +/////////////////////////////////////////////////////////////////////////////// +/////////// For Scan messages +/////////////////////////////////////////////////////////////////////////////// +enum scan_msg_tag +{ + /// Scanning start Request. + SCAN_START_REQ = LMAC_FIRST_MSG(TASK_SCAN), + /// Scanning start Confirmation. + SCAN_START_CFM, + /// End of scanning indication. + SCAN_DONE_IND, + /// Cancel scan request + SCAN_CANCEL_REQ, + /// Cancel scan confirmation + SCAN_CANCEL_CFM, + + /// MAX number of messages + SCAN_MAX, +}; + +/// Maximum number of SSIDs in a scan request +#define SCAN_SSID_MAX 3 + +/// Maximum number of channels in a scan request +#define SCAN_CHANNEL_MAX (MAC_DOMAINCHANNEL_24G_MAX + MAC_DOMAINCHANNEL_5G_MAX) + +/// Maximum length of the ProbeReq IEs (SoftMAC mode) +#define SCAN_MAX_IE_LEN 300 + +/// Maximum number of PHY bands supported +#define SCAN_BAND_MAX 2 + +/// Structure containing the parameters of the @ref SCAN_START_REQ message +struct scan_start_req +{ + /// List of channel to be scanned + struct mac_chan_def chan[SCAN_CHANNEL_MAX]; + /// List of SSIDs to be scanned + struct mac_ssid ssid[SCAN_SSID_MAX]; + /// BSSID to be scanned + struct mac_addr bssid; + /// Pointer (in host memory) to the additional IEs that need to be added to the ProbeReq + /// (following the SSID element) + u32_l add_ies; + /// Length of the additional IEs + u16_l add_ie_len; + /// Index of the VIF that is scanning + u8_l vif_idx; + /// Number of channels to scan + u8_l chan_cnt; + /// Number of SSIDs to scan for + u8_l ssid_cnt; + /// no CCK - For P2P frames not being sent at CCK rate in 2GHz band. + bool no_cck; +}; + +/// Structure containing the parameters of the @ref SCAN_START_CFM message +struct scan_start_cfm +{ + /// Status of the request + u8_l status; +}; + +/// Structure containing the parameters of the @ref SCAN_CANCEL_REQ message +struct scan_cancel_req +{ +}; + +/// Structure containing the parameters of the @ref SCAN_START_CFM message +struct scan_cancel_cfm +{ + /// Status of the request + u8_l status; +}; + +/////////////////////////////////////////////////////////////////////////////// +/////////// For Scanu messages +/////////////////////////////////////////////////////////////////////////////// +/// Messages that are logically related to the task. +enum +{ + /// Scan request from host. + SCANU_START_REQ = LMAC_FIRST_MSG(TASK_SCANU), + /// Scanning start Confirmation. + SCANU_START_CFM, + /// Join request + SCANU_JOIN_REQ, + /// Join confirmation. + SCANU_JOIN_CFM, + /// Scan result indication. + SCANU_RESULT_IND, + /// Fast scan request from any other module. + SCANU_FAST_REQ, + /// Confirmation of fast scan request. + SCANU_FAST_CFM, + + SCANU_VENDOR_IE_REQ, + SCANU_VENDOR_IE_CFM, + SCANU_START_CFM_ADDTIONAL, + SCANU_CANCEL_REQ, + SCANU_CANCEL_CFM, + + /// MAX number of messages + SCANU_MAX, +}; + +/// Maximum length of the additional ProbeReq IEs (FullMAC mode) +#define SCANU_MAX_IE_LEN 200 + +/// Structure containing the parameters of the @ref SCANU_START_REQ message +struct scanu_start_req +{ + /// List of channel to be scanned + struct mac_chan_def chan[SCAN_CHANNEL_MAX]; + /// List of SSIDs to be scanned + struct mac_ssid ssid[SCAN_SSID_MAX]; + /// BSSID to be scanned (or WILDCARD BSSID if no BSSID is searched in particular) + struct mac_addr bssid; + /// Address (in host memory) of the additional IEs that need to be added to the ProbeReq + /// (following the SSID element) + u32_l add_ies; + /// Length of the additional IEs + u16_l add_ie_len; + /// Index of the VIF that is scanning + u8_l vif_idx; + /// Number of channels to scan + u8_l chan_cnt; + /// Number of SSIDs to scan for + u8_l ssid_cnt; + /// no CCK - For P2P frames not being sent at CCK rate in 2GHz band. + bool no_cck; + /// Scan duration, in us + u32_l duration; +}; + +struct scanu_vendor_ie_req +{ + u16_l add_ie_len; + u8_l vif_idx; + u8_l ie[256]; +}; + +/// Structure containing the parameters of the @ref SCANU_START_CFM message +struct scanu_start_cfm +{ + /// Index of the VIF that was scanning + u8_l vif_idx; + /// Status of the request + u8_l status; + /// Number of scan results available + u8_l result_cnt; +}; + +/// Parameters of the @SCANU_RESULT_IND message +struct scanu_result_ind +{ + /// Length of the frame + u16_l length; + /// Frame control field of the frame. + u16_l framectrl; + /// Center frequency on which we received the packet + u16_l center_freq; + /// PHY band + u8_l band; + /// Index of the station that sent the frame. 0xFF if unknown. + u8_l sta_idx; + /// Index of the VIF that received the frame. 0xFF if unknown. + u8_l inst_nbr; + /// RSSI of the received frame. + s8_l rssi; + /// Frame payload. + u32_l payload[]; +}; + +/// Structure containing the parameters of the message. +struct scanu_fast_req +{ + /// The SSID to scan in the channel. + struct mac_ssid ssid; + /// BSSID. + struct mac_addr bssid; + /// Probe delay. + u16_l probe_delay; + /// Minimum channel time. + u16_l minch_time; + /// Maximum channel time. + u16_l maxch_time; + /// The channel number to scan. + u16_l ch_nbr; +}; + +/////////////////////////////////////////////////////////////////////////////// +/////////// For ME messages +/////////////////////////////////////////////////////////////////////////////// +/// Messages that are logically related to the task. +enum +{ + /// Configuration request from host. + ME_CONFIG_REQ = LMAC_FIRST_MSG(TASK_ME), + /// Configuration confirmation. + ME_CONFIG_CFM, + /// Configuration request from host. + ME_CHAN_CONFIG_REQ, + /// Configuration confirmation. + ME_CHAN_CONFIG_CFM, + /// Set control port state for a station. + ME_SET_CONTROL_PORT_REQ, + /// Control port setting confirmation. + ME_SET_CONTROL_PORT_CFM, + /// TKIP MIC failure indication. + ME_TKIP_MIC_FAILURE_IND, + /// Add a station to the FW (AP mode) + ME_STA_ADD_REQ, + /// Confirmation of the STA addition + ME_STA_ADD_CFM, + /// Delete a station from the FW (AP mode) + ME_STA_DEL_REQ, + /// Confirmation of the STA deletion + ME_STA_DEL_CFM, + /// Indication of a TX RA/TID queue credit update + ME_TX_CREDITS_UPDATE_IND, + /// Request indicating to the FW that there is traffic buffered on host + ME_TRAFFIC_IND_REQ, + /// Confirmation that the @ref ME_TRAFFIC_IND_REQ has been executed + ME_TRAFFIC_IND_CFM, + /// Request of RC statistics to a station + ME_RC_STATS_REQ, + /// RC statistics confirmation + ME_RC_STATS_CFM, + /// RC fixed rate request + ME_RC_SET_RATE_REQ, + /// Configure monitor interface + ME_CONFIG_MONITOR_REQ, + /// Configure monitor interface response + ME_CONFIG_MONITOR_CFM, + /// Setting power Save mode request from host + ME_SET_PS_MODE_REQ, + /// Set power Save mode confirmation + ME_SET_PS_MODE_CFM, + /// Setting Low Power level request from host + ME_SET_LP_LEVEL_REQ, + /// Set Low Power level confirmation + ME_SET_LP_LEVEL_CFM, + /// MAX number of messages + ME_MAX, +}; + +/// Structure containing the parameters of the @ref ME_START_REQ message +struct me_config_req +{ + /// HT Capabilities + struct mac_htcapability ht_cap; + /// VHT Capabilities + struct mac_vhtcapability vht_cap; + /// HE capabilities + struct mac_hecapability he_cap; + /// Lifetime of packets sent under a BlockAck agreement (expressed in TUs) + u16_l tx_lft; + /// Maximum supported BW + u8_l phy_bw_max; + /// Boolean indicating if HT is supported or not + bool_l ht_supp; + /// Boolean indicating if VHT is supported or not + bool_l vht_supp; + /// Boolean indicating if HE is supported or not + bool_l he_supp; + /// Boolean indicating if HE OFDMA UL is enabled or not + bool_l he_ul_on; + /// Boolean indicating if PS mode shall be enabled or not + bool_l ps_on; + /// Boolean indicating if Antenna Diversity shall be enabled or not + bool_l ant_div_on; + /// Boolean indicating if Dynamic PS mode shall be used or not + bool_l dpsm; +}; + +/// Structure containing the parameters of the @ref ME_CHAN_CONFIG_REQ message +struct me_chan_config_req +{ + /// List of 2.4GHz supported channels + struct mac_chan_def chan2G4[MAC_DOMAINCHANNEL_24G_MAX]; + /// List of 5GHz supported channels + struct mac_chan_def chan5G[MAC_DOMAINCHANNEL_5G_MAX]; + /// Number of 2.4GHz channels in the list + u8_l chan2G4_cnt; + /// Number of 5GHz channels in the list + u8_l chan5G_cnt; +}; + +/// Structure containing the parameters of the @ref ME_SET_CONTROL_PORT_REQ message +struct me_set_control_port_req +{ + /// Index of the station for which the control port is opened + u8_l sta_idx; + /// Control port state + bool_l control_port_open; +}; + +/// Structure containing the parameters of the @ref ME_TKIP_MIC_FAILURE_IND message +struct me_tkip_mic_failure_ind +{ + /// Address of the sending STA + struct mac_addr addr; + /// TSC value + u64_l tsc; + /// Boolean indicating if the packet was a group or unicast one (true if group) + bool_l ga; + /// Key Id + u8_l keyid; + /// VIF index + u8_l vif_idx; +}; + +/// Structure containing the parameters of the @ref ME_STA_ADD_REQ message +struct me_sta_add_req +{ + /// MAC address of the station to be added + struct mac_addr mac_addr; + /// Supported legacy rates + struct mac_rateset rate_set; + /// HT Capabilities + struct mac_htcapability ht_cap; + /// VHT Capabilities + struct mac_vhtcapability vht_cap; + /// HE capabilities + struct mac_hecapability he_cap; + /// Flags giving additional information about the station (@ref mac_sta_flags) + u32_l flags; + /// Association ID of the station + u16_l aid; + /// Bit field indicating which queues have U-APSD enabled + u8_l uapsd_queues; + /// Maximum size, in frames, of a APSD service period + u8_l max_sp_len; + /// Operation mode information (valid if bit @ref STA_OPMOD_NOTIF is + /// set in the flags) + u8_l opmode; + /// Index of the VIF the station is attached to + u8_l vif_idx; + /// Whether the the station is TDLS station + bool_l tdls_sta; + /// Indicate if the station is TDLS link initiator station + bool_l tdls_sta_initiator; + /// Indicate if the TDLS Channel Switch is allowed + bool_l tdls_chsw_allowed; +}; + +/// Structure containing the parameters of the @ref ME_STA_ADD_CFM message +struct me_sta_add_cfm +{ + /// Station index + u8_l sta_idx; + /// Status of the station addition + u8_l status; + /// PM state of the station + u8_l pm_state; + u8_l alligned; +}; + +/// Structure containing the parameters of the @ref ME_STA_DEL_REQ message. +struct me_sta_del_req +{ + /// Index of the station to be deleted + u8_l sta_idx; + /// Whether the the station is TDLS station + bool_l tdls_sta; +}; + +/// Structure containing the parameters of the @ref ME_TX_CREDITS_UPDATE_IND message. +struct me_tx_credits_update_ind +{ + /// Index of the station for which the credits are updated + u8_l sta_idx; + /// TID for which the credits are updated + u8_l tid; + /// Offset to be applied on the credit count + s8_l credits; +}; + +/// Structure containing the parameters of the @ref ME_TRAFFIC_IND_REQ message. +struct me_traffic_ind_req +{ + /// Index of the station for which UAPSD traffic is available on host + u8_l sta_idx; + /// Flag indicating the availability of UAPSD packets for the given STA + u8_l tx_avail; + /// Indicate if traffic is on uapsd-enabled queues + bool_l uapsd; +}; + +struct mm_apm_staloss_ind +{ + u8_l sta_idx; + u8_l vif_idx; + u8_l mac_addr[6]; +}; + +enum vendor_hwconfig_tag{ + ACS_TXOP_REQ = 0, + CHANNEL_ACCESS_REQ, + MAC_TIMESCALE_REQ, + CCA_THRESHOLD_REQ, + BWMODE_REQ, + CHIP_TEMP_GET_REQ, + AP_PS_LEVEL_SET_REQ, + CUSTOMIZED_FREQ_REQ, + WAKEUP_INFO_REQ, + KEEPALIVE_PKT_REQ, +}; + +enum vendor_hwconfig_tag_x2{ + ACS_TXOP_REQ_X2 = 0, + CHANNEL_ACCESS_REQ_X2, + MAC_TIMESCALE_REQ_X2, + CCA_THRESHOLD_REQ_X2, + BWMODE_REQ_X2, + CHIP_TEMP_GET_REQ_X2, + STBC_MCS_SET_REQ_X2, + MAX_AGG_TX_CNT_REQ_X2, + MAX_BW_MCS_THRESH_SET_REQ_X2, + DCM_FORCE_EN_REQ_X2, + AUTO_CCA_EN_REQ_X2, + NSS_1T2R_REQ_X2, + ON_AIR_DUTY_CYCLE_REQ_X2, +}; + + +enum { + BWMODE20M = 0, + BWMODE10M, + BWMODE5M, +}; + +struct mm_set_acs_txop_req +{ + u32_l hwconfig_id; + u16_l txop_bk; + u16_l txop_be; + u16_l txop_vi; + u16_l txop_vo; +}; + +struct mm_set_channel_access_req +{ + u32_l hwconfig_id; + u32_l edca[4]; + u8_l vif_idx; + u8_l retry_cnt; + u8_l rts_en; + u8_l long_nav_en; + u8_l cfe_en; + u8_l rc_retry_cnt[3]; + s8_l ccademod_th; + u8_l remove_1m2m; +}; + +struct mm_set_mac_timescale_req +{ + u32_l hwconfig_id; + u8_l sifsA_time; + u8_l sifsB_time; + u8_l slot_time; + u8_l rx_startdelay_ofdm; + u8_l rx_startdelay_long; + u8_l rx_startdelay_short; +}; + +struct mm_set_cca_threshold_req +{ + u32_l hwconfig_id; + u8_l auto_cca_en; + s8_l cca20p_rise_th; + s8_l cca20s_rise_th; + s8_l cca20p_fall_th; + s8_l cca20s_fall_th; + +}; + +struct mm_set_bwmode_req +{ + u32_l hwconfig_id; + u8_l bwmode; +}; + +struct mm_get_chip_temp_req +{ + u32_l hwconfig_id; +}; + +struct mm_get_chip_temp_cfm +{ + /// Temp degree val + s8_l degree; +}; + +struct mm_get_stbc_msc_req +{ + u32_l hwconfig_id; + u8_l enable; + u8_l mcs_thresh; +}; + +struct mm_set_max_tx_agg_cnt_req +{ + u32_l hwconfig_id; + u8_l enale; + u8_l mcs_thresh; + u8_l max_agg_cnt[AC_MAX]; +}; + +struct mm_set_max_bw_mcs_thresh_req +{ + u32_l hwconfig_id; + u8_l enale; + u8_l max_bw_mcs_thresh; +}; + +struct mm_set_dcm_force_en_req +{ + u32_l hwconfig_id; + u8_l enable; +}; + + +struct mm_set_auto_cca_en_req +{ + u32_l hwconfig_id; + u8_l enable; + int8_t max_cca_thresh; + u8_l default_cca_set; + int8_t default_cca_thresh; +}; + +struct mm_set_nss_1t2r_req +{ + u32_l hwconfig_id; + u8_l enable; +}; + +struct mm_set_on_air_duty_cycle_req +{ + u32_l hwconfig_id; + u8_l enable; + u8_l percent;//10 means 10%, 1-99 +}; + + +struct mm_set_vendor_hwconfig_cfm +{ + u32_l hwconfig_id; + union { + struct mm_get_chip_temp_cfm chip_temp_cfm; + }; +}; + +struct mm_set_customized_freq_req +{ + u32_l hwconfig_id; + u16_l raw_freq[4]; + u16_l map_freq[4]; +}; + +struct mm_set_keepalive_req +{ + u32_l hwconfig_id; + u16_l code; + u16_l length; + u32_l intv; + u8_l payload[]; +}; + +struct mm_set_txop_req +{ + u16_l txop_bk; + u16_l txop_be; + u16_l txop_vi; + u16_l txop_vo; + u8_l long_nav_en; + u8_l cfe_en; +}; + +struct mm_set_vendor_trx_param_req +{ + u32_l edca[4]; + u8_l vif_idx; + u8_l retry_cnt; +}; + +struct mm_get_fw_version_cfm +{ + u8_l fw_version_len; + u8_l fw_version[63]; +}; + +struct mm_get_wifi_disable_cfm +{ + u8_l wifi_disable; +}; + +enum vendor_swconfig_tag +{ + BCN_CFG_REQ = 0, + TEMP_COMP_SET_REQ, + TEMP_COMP_GET_REQ, + EXT_FLAGS_SET_REQ, + EXT_FLAGS_GET_REQ, + EXT_FLAGS_MASK_SET_REQ, +}; + +enum vendor_swconfig_tag_x2 +{ + BCN_CFG_REQ_X2 = 0, + TEMP_COMP_SET_REQ_X2, + TEMP_COMP_GET_REQ_X2, + EXT_FLAGS_SET_REQ_X2, + EXT_FLAGS_GET_REQ_X2, + EXT_FLAGS_MASK_SET_REQ_X2, + TWO_ANT_RSSI_GET_REQ_X2, +}; + + +struct mm_set_bcn_cfg_req +{ + /// Ignore or not bcn tim bcmc bit + bool_l tim_bcmc_ignored_enable; +}; + +struct mm_set_bcn_cfg_cfm +{ + /// Request status + bool_l tim_bcmc_ignored_status; +}; + +struct mm_set_temp_comp_req +{ + /// Enable or not temp comp + u8_l enable; + u8_l reserved[3]; + u32_l tmr_period_ms; +}; + +struct mm_set_temp_comp_cfm +{ + /// Request status + u8_l status; +}; + +struct mm_get_temp_comp_cfm +{ + /// Request status + u8_l status; + /// Temp degree val + s8_l degree; +}; + +struct mm_set_ext_flags_req +{ + u32_l user_flags; +}; + +struct mm_set_ext_flags_cfm +{ + u32_l user_flags; +}; + +struct mm_get_ext_flags_cfm +{ + u32_l user_flags; +}; + +struct mm_mask_set_ext_flags_req +{ + u32_l user_flags_mask; + u32_l user_flags_val; +}; + +struct mm_mask_set_ext_flags_cfm +{ + u32_l user_flags; +}; + +struct mm_set_vendor_swconfig_req +{ + u32_l swconfig_id; + union { + struct mm_set_bcn_cfg_req bcn_cfg_req; + struct mm_set_temp_comp_req temp_comp_set_req; + struct mm_set_ext_flags_req ext_flags_set_req; + struct mm_mask_set_ext_flags_req ext_flags_mask_set_req; + }; +}; + +struct mm_set_vendor_swconfig_cfm +{ + u32_l swconfig_id; + union { + struct mm_set_bcn_cfg_cfm bcn_cfg_cfm; + struct mm_set_temp_comp_cfm temp_comp_set_cfm; + struct mm_get_temp_comp_cfm temp_comp_get_cfm; + struct mm_set_ext_flags_cfm ext_flags_set_cfm; + struct mm_get_ext_flags_cfm ext_flags_get_cfm; + struct mm_mask_set_ext_flags_cfm ext_flags_mask_set_cfm; + }; +}; + +struct mm_set_txpwr_lvl_per_sta_req +{ + u8_l sta_idx; + s8_l tx_pwr_offset; +}; + +/// Structure containing the parameters of the @ref ME_RC_STATS_REQ message. +struct me_rc_stats_req +{ + /// Index of the station for which the RC statistics are requested + u8_l sta_idx; +}; + +/// Structure containing the rate control statistics +struct rc_rate_stats +{ + /// Number of attempts (per sampling interval) + u16_l attempts; + /// Number of success (per sampling interval) + u16_l success; + /// Estimated probability of success (EWMA) + u16_l probability; + /// Rate configuration of the sample + u16_l rate_config; + union + { + struct { + /// Number of times the sample has been skipped (per sampling interval) + u8_l sample_skipped; + /// Whether the old probability is available + bool_l old_prob_available; + /// Whether the rate can be used in the retry chain + bool_l rate_allowed; + }; + struct { + /// RU size and UL length received in the latest HE trigger frame + u16_l ru_and_length; + }; + }; +}; + +/// Number of RC samples +#define RC_MAX_N_SAMPLE 10 +/// Index of the HE statistics element in the table +#define RC_HE_STATS_IDX RC_MAX_N_SAMPLE + +/// Structure containing the parameters of the @ref ME_RC_STATS_CFM message. +struct me_rc_stats_cfm +{ + /// Index of the station for which the RC statistics are provided + u8_l sta_idx; + /// Number of samples used in the RC algorithm + u16_l no_samples; + /// Number of MPDUs transmitted (per sampling interval) + u16_l ampdu_len; + /// Number of AMPDUs transmitted (per sampling interval) + u16_l ampdu_packets; + /// Average number of MPDUs in each AMPDU frame (EWMA) + u32_l avg_ampdu_len; + // Current step 0 of the retry chain + u8_l sw_retry_step; + /// Trial transmission period + u8_l sample_wait; + /// Retry chain steps + u16_l retry_step_idx[4]; + /// RC statistics - Max number of RC samples, plus one for the HE TB statistics + struct rc_rate_stats rate_stats[RC_MAX_N_SAMPLE + 1]; + /// Throughput - Max number of RC samples, plus one for the HE TB statistics + u32_l tp[RC_MAX_N_SAMPLE + 1]; +}; + +/// Structure containing the parameters of the @ref ME_RC_SET_RATE_REQ message. +struct me_rc_set_rate_req +{ + /// Index of the station for which the fixed rate is set + u8_l sta_idx; + /// Rate configuration to be set + u16_l fixed_rate_cfg; +}; + +/// Structure containing the parameters of the @ref ME_CONFIG_MONITOR_REQ message. +struct me_config_monitor_req +{ + /// Channel to configure + struct mac_chan_op chan; + /// Is channel data valid + bool_l chan_set; + /// Enable report of unsupported HT frames + bool_l uf; + /// Enable auto-reply as the mac_addr matches + bool_l auto_reply; +}; + +/// Structure containing the parameters of the @ref ME_CONFIG_MONITOR_CFM message. +struct me_config_monitor_cfm +{ + /// Channel context index + u8_l chan_index; + /// Channel parameters + struct mac_chan_op chan; +}; + +/// Structure containing the parameters of the @ref ME_SET_PS_MODE_REQ message. +struct me_set_ps_mode_req +{ + /// Power Save is activated or deactivated + u8_l ps_state; +}; + +/// Structure containing the parameters of the @ref ME_SET_LP_LEVEL_REQ message. +struct me_set_lp_level_req +{ + /// Low Power level + u8_l lp_level; +}; + + +/////////////////////////////////////////////////////////////////////////////// +/////////// For SM messages +/////////////////////////////////////////////////////////////////////////////// +/// Message API of the SM task +enum sm_msg_tag +{ + /// Request to connect to an AP + SM_CONNECT_REQ = LMAC_FIRST_MSG(TASK_SM), + /// Confirmation of connection + SM_CONNECT_CFM, + /// Indicates that the SM associated to the AP + SM_CONNECT_IND, + /// Request to disconnect + SM_DISCONNECT_REQ, + /// Confirmation of disconnection + SM_DISCONNECT_CFM, + /// Indicates that the SM disassociated the AP + SM_DISCONNECT_IND, + /// Request to start external authentication + SM_EXTERNAL_AUTH_REQUIRED_IND, + /// Response to external authentication request + SM_EXTERNAL_AUTH_REQUIRED_RSP, + /// Request to update assoc elements after FT over the air authentication + SM_FT_AUTH_IND, + /// Response to FT authentication with updated assoc elements + SM_FT_AUTH_RSP, + + SM_RSP_TIMEOUT_IND, + + SM_COEX_TS_TIMEOUT_IND, + + SM_EXTERNAL_AUTH_REQUIRED_RSP_CFM, + + /// MAX number of messages + SM_MAX, +}; + +/// Structure containing the parameters of @ref SM_CONNECT_REQ message. +struct sm_connect_req +{ + /// SSID to connect to + struct mac_ssid ssid; + /// BSSID to connect to (if not specified, set this field to WILDCARD BSSID) + struct mac_addr bssid; + /// Channel on which we have to connect (if not specified, set -1 in the chan.freq field) + struct mac_chan_def chan; + /// Connection flags (see @ref mac_connection_flags) + u32_l flags; + /// Control port Ethertype (in network endianness) + u16_l ctrl_port_ethertype; + /// Length of the association request IEs + u16_l ie_len; + /// Listen interval to be used for this connection + u16_l listen_interval; + /// Flag indicating if the we have to wait for the BC/MC traffic after beacon or not + bool_l dont_wait_bcmc; + /// Authentication type + u8_l auth_type; + /// UAPSD queues (bit0: VO, bit1: VI, bit2: BE, bit3: BK) + u8_l uapsd_queues; + /// VIF index + u8_l vif_idx; + /// Buffer containing the additional information elements to be put in the + /// association request + u32_l ie_buf[64]; +}; + +/// Structure containing the parameters of the @ref SM_CONNECT_CFM message. +struct sm_connect_cfm +{ + /// Status. If 0, it means that the connection procedure will be performed and that + /// a subsequent @ref SM_CONNECT_IND message will be forwarded once the procedure is + /// completed + u8_l status; +}; + +#define SM_ASSOC_IE_LEN 800 +/// Structure containing the parameters of the @ref SM_CONNECT_IND message. +struct sm_connect_ind +{ + /// Status code of the connection procedure + u16_l status_code; + /// BSSID + struct mac_addr bssid; + /// Flag indicating if the indication refers to an internal roaming or from a host request + bool_l roamed; + /// Index of the VIF for which the association process is complete + u8_l vif_idx; + /// Index of the STA entry allocated for the AP + u8_l ap_idx; + /// Index of the LMAC channel context the connection is attached to + u8_l ch_idx; + /// Flag indicating if the AP is supporting QoS + bool_l qos; + /// ACM bits set in the AP WMM parameter element + u8_l acm; + /// Length of the AssocReq IEs + u16_l assoc_req_ie_len; + /// Length of the AssocRsp IEs + u16_l assoc_rsp_ie_len; + /// IE buffer + u32_l assoc_ie_buf[SM_ASSOC_IE_LEN/4]; + + u16_l aid; + u8_l band; + u16_l center_freq; + u8_l width; + u32_l center_freq1; + u32_l center_freq2; + + /// EDCA parameters + u32_l ac_param[AC_MAX]; +}; + +/// Structure containing the parameters of the @ref SM_DISCONNECT_REQ message. +struct sm_disconnect_req +{ + /// Reason of the deauthentication. + u16_l reason_code; + /// Index of the VIF. + u8_l vif_idx; +}; + +/// Structure containing the parameters of SM_ASSOCIATION_IND the message +struct sm_association_ind +{ + // MAC ADDR of the STA + struct mac_addr me_mac_addr; +}; + + +/// Structure containing the parameters of the @ref SM_DISCONNECT_IND message. +struct sm_disconnect_ind +{ + /// Reason of the disconnection. + u16_l reason_code; + /// Index of the VIF. + u8_l vif_idx; + /// FT over DS is ongoing + bool_l ft_over_ds; + u8_l reassoc; +}; + +/// Structure containing the parameters of the @ref SM_EXTERNAL_AUTH_REQUIRED_IND +struct sm_external_auth_required_ind +{ + /// Index of the VIF. + u8_l vif_idx; + /// SSID to authenticate to + struct mac_ssid ssid; + /// BSSID to authenticate to + struct mac_addr bssid; + /// AKM suite of the respective authentication + u32_l akm; +}; + +/// Structure containing the parameters of the @ref SM_EXTERNAL_AUTH_REQUIRED_RSP +struct sm_external_auth_required_rsp +{ + /// Index of the VIF. + u8_l vif_idx; + /// Authentication status + u16_l status; +}; + +/////////////////////////////////////////////////////////////////////////////// +/////////// For APM messages +/////////////////////////////////////////////////////////////////////////////// +/// Message API of the APM task +enum apm_msg_tag +{ + /// Request to start the AP. + APM_START_REQ = LMAC_FIRST_MSG(TASK_APM), + /// Confirmation of the AP start. + APM_START_CFM, + /// Request to stop the AP. + APM_STOP_REQ, + /// Confirmation of the AP stop. + APM_STOP_CFM, + /// Request to start CAC + APM_START_CAC_REQ, + /// Confirmation of the CAC start + APM_START_CAC_CFM, + /// Request to stop CAC + APM_STOP_CAC_REQ, + /// Confirmation of the CAC stop + APM_STOP_CAC_CFM, + + APM_SET_BEACON_IE_REQ, + APM_SET_BEACON_IE_CFM, + /// MAX number of messages + APM_MAX, +}; + +/// Structure containing the parameters of the @ref APM_START_REQ message. +struct apm_start_req +{ + /// Basic rate set + struct mac_rateset basic_rates; + /// Control channel on which we have to enable the AP + struct mac_chan_def chan; + /// Center frequency of the first segment + u32_l center_freq1; + /// Center frequency of the second segment (only in 80+80 configuration) + u32_l center_freq2; + /// Width of channel + u8_l ch_width; + /// Address, in host memory, to the beacon template + u32_l bcn_addr; + /// Length of the beacon template + u16_l bcn_len; + /// Offset of the TIM IE in the beacon + u16_l tim_oft; + /// Beacon interval + u16_l bcn_int; + /// Flags (@ref mac_connection_flags) + u32_l flags; + /// Control port Ethertype + u16_l ctrl_port_ethertype; + /// Length of the TIM IE + u8_l tim_len; + /// Index of the VIF for which the AP is started + u8_l vif_idx; +}; + +struct apm_set_bcn_ie_req +{ + u8_l vif_idx; + u16_l bcn_ie_len; + u8_l bcn_ie[512]; +}; + +/// Structure containing the parameters of the @ref APM_START_CFM message. +struct apm_start_cfm +{ + /// Status of the AP starting procedure + u8_l status; + /// Index of the VIF for which the AP is started + u8_l vif_idx; + /// Index of the channel context attached to the VIF + u8_l ch_idx; + /// Index of the STA used for BC/MC traffic + u8_l bcmc_idx; +}; + +/// Structure containing the parameters of the @ref APM_STOP_REQ message. +struct apm_stop_req +{ + /// Index of the VIF for which the AP has to be stopped + u8_l vif_idx; +}; + +/// Structure containing the parameters of the @ref APM_START_CAC_REQ message. +struct apm_start_cac_req +{ + /// Control channel on which we have to start the CAC + struct mac_chan_op chan; + /// Center frequency of the first segment + //u32_l center_freq1; + /// Center frequency of the second segment (only in 80+80 configuration) + //u32_l center_freq2; + /// Width of channel + //u8_l ch_width; + /// Index of the VIF for which the CAC is started + u8_l vif_idx; +}; + +/// Structure containing the parameters of the @ref APM_START_CAC_CFM message. +struct apm_start_cac_cfm +{ + /// Status of the CAC starting procedure + u8_l status; + /// Index of the channel context attached to the VIF for CAC + u8_l ch_idx; +}; + +/// Structure containing the parameters of the @ref APM_STOP_CAC_REQ message. +struct apm_stop_cac_req +{ + /// Index of the VIF for which the CAC has to be stopped + u8_l vif_idx; +}; + +/////////////////////////////////////////////////////////////////////////////// +/////////// For MESH messages +/////////////////////////////////////////////////////////////////////////////// + +/// Maximum length of the Mesh ID +#define MESH_MESHID_MAX_LEN (32) + +/// Message API of the MESH task +enum mesh_msg_tag +{ + /// Request to start the MP + MESH_START_REQ = LMAC_FIRST_MSG(TASK_MESH), + /// Confirmation of the MP start. + MESH_START_CFM, + + /// Request to stop the MP. + MESH_STOP_REQ, + /// Confirmation of the MP stop. + MESH_STOP_CFM, + + // Request to update the MP + MESH_UPDATE_REQ, + /// Confirmation of the MP update + MESH_UPDATE_CFM, + + /// Request information about a given link + MESH_PEER_INFO_REQ, + /// Response to the MESH_PEER_INFO_REQ message + MESH_PEER_INFO_CFM, + + /// Request automatic establishment of a path with a given mesh STA + MESH_PATH_CREATE_REQ, + /// Confirmation to the MESH_PATH_CREATE_REQ message + MESH_PATH_CREATE_CFM, + + /// Request a path update (delete path, modify next hop mesh STA) + MESH_PATH_UPDATE_REQ, + /// Confirmation to the MESH_PATH_UPDATE_REQ message + MESH_PATH_UPDATE_CFM, + + /// Indication from Host that the indicated Mesh Interface is a proxy for an external STA + MESH_PROXY_ADD_REQ, + + /// Indicate that a connection has been established or lost + MESH_PEER_UPDATE_IND, + /// Notification that a connection has been established or lost (when MPM handled by userspace) + MESH_PEER_UPDATE_NTF = MESH_PEER_UPDATE_IND, + + /// Indicate that a path is now active or inactive + MESH_PATH_UPDATE_IND, + /// Indicate that proxy information have been updated + MESH_PROXY_UPDATE_IND, + + /// MAX number of messages + MESH_MAX, +}; + +/// Structure containing the parameters of the @ref MESH_START_REQ message. +struct mesh_start_req +{ + /// Basic rate set + struct mac_rateset basic_rates; + /// Control channel on which we have to enable the AP + struct mac_chan_def chan; + /// Center frequency of the first segment + u32_l center_freq1; + /// Center frequency of the second segment (only in 80+80 configuration) + u32_l center_freq2; + /// Width of channel + u8_l ch_width; + /// DTIM Period + u8_l dtim_period; + /// Beacon Interval + u16_l bcn_int; + /// Index of the VIF for which the MP is started + u8_l vif_index; + /// Length of the Mesh ID + u8_l mesh_id_len; + /// Mesh ID + u8_l mesh_id[MESH_MESHID_MAX_LEN]; + /// Address of the IEs to download + u32_l ie_addr; + /// Length of the provided IEs + u8_l ie_len; + /// Indicate if Mesh Peering Management (MPM) protocol is handled in userspace + bool_l user_mpm; + /// Indicate if Mesh Point is using authentication + bool_l is_auth; + /// Indicate which authentication method is used + u8_l auth_id; +}; + +/// Structure containing the parameters of the @ref MESH_START_CFM message. +struct mesh_start_cfm +{ + /// Status of the MP starting procedure + u8_l status; + /// Index of the VIF for which the MP is started + u8_l vif_idx; + /// Index of the channel context attached to the VIF + u8_l ch_idx; + /// Index of the STA used for BC/MC traffic + u8_l bcmc_idx; +}; + +/// Structure containing the parameters of the @ref MESH_STOP_REQ message. +struct mesh_stop_req +{ + /// Index of the VIF for which the MP has to be stopped + u8_l vif_idx; +}; + +/// Structure containing the parameters of the @ref MESH_STOP_CFM message. +struct mesh_stop_cfm +{ + /// Index of the VIF for which the MP has to be stopped + u8_l vif_idx; + /// Status + u8_l status; +}; + +/// Bit fields for mesh_update_req message's flags value +enum mesh_update_flags_bit +{ + /// Root Mode + MESH_UPDATE_FLAGS_ROOT_MODE_BIT = 0, + /// Gate Mode + MESH_UPDATE_FLAGS_GATE_MODE_BIT, + /// Mesh Forwarding + MESH_UPDATE_FLAGS_MESH_FWD_BIT, + /// Local Power Save Mode + MESH_UPDATE_FLAGS_LOCAL_PSM_BIT, +}; + +/// Structure containing the parameters of the @ref MESH_UPDATE_REQ message. +struct mesh_update_req +{ + /// Flags, indicate fields which have been updated + u8_l flags; + /// VIF Index + u8_l vif_idx; + /// Root Mode + u8_l root_mode; + /// Gate Announcement + bool_l gate_announ; + /// Mesh Forwarding + bool_l mesh_forward; + /// Local PS Mode + u8_l local_ps_mode; +}; + +/// Structure containing the parameters of the @ref MESH_UPDATE_CFM message. +struct mesh_update_cfm +{ + /// Status + u8_l status; +}; + +/// Structure containing the parameters of the @ref MESH_PEER_INFO_REQ message. +struct mesh_peer_info_req +{ + ///Index of the station allocated for the peer + u8_l sta_idx; +}; + +/// Structure containing the parameters of the @ref MESH_PEER_INFO_CFM message. +struct mesh_peer_info_cfm +{ + /// Response status + u8_l status; + /// Index of the station allocated for the peer + u8_l sta_idx; + /// Local Link ID + u16_l local_link_id; + /// Peer Link ID + u16_l peer_link_id; + /// Local PS Mode + u8_l local_ps_mode; + /// Peer PS Mode + u8_l peer_ps_mode; + /// Non-peer PS Mode + u8_l non_peer_ps_mode; + /// Link State + u8_l link_state; +}; + +/// Structure containing the parameters of the @ref MESH_PATH_CREATE_REQ message. +struct mesh_path_create_req +{ + /// Index of the interface on which path has to be created + u8_l vif_idx; + /// Indicate if originator MAC Address is provided + bool_l has_orig_addr; + /// Path Target MAC Address + struct mac_addr tgt_mac_addr; + /// Originator MAC Address + struct mac_addr orig_mac_addr; +}; + +/// Structure containing the parameters of the @ref MESH_PATH_CREATE_CFM message. +struct mesh_path_create_cfm +{ + /// Confirmation status + u8_l status; + /// VIF Index + u8_l vif_idx; +}; + +/// Structure containing the parameters of the @ref MESH_PATH_UPDATE_REQ message. +struct mesh_path_update_req +{ + /// Indicate if path must be deleted + bool_l delete; + /// Index of the interface on which path has to be created + u8_l vif_idx; + /// Path Target MAC Address + struct mac_addr tgt_mac_addr; + /// Next Hop MAC Address + struct mac_addr nhop_mac_addr; +}; + +/// Structure containing the parameters of the @ref MESH_PATH_UPDATE_CFM message. +struct mesh_path_update_cfm +{ + /// Confirmation status + u8_l status; + /// VIF Index + u8_l vif_idx; +}; + +/// Structure containing the parameters of the @ref MESH_PROXY_ADD_REQ message. +struct mesh_proxy_add_req +{ + /// VIF Index + u8_l vif_idx; + /// MAC Address of the External STA + struct mac_addr ext_sta_addr; +}; + +/// Structure containing the parameters of the @ref MESH_PROXY_UPDATE_IND +struct mesh_proxy_update_ind +{ + /// Indicate if proxy information has been added or deleted + bool_l delete; + /// Indicate if we are a proxy for the external STA + bool_l local; + /// VIF Index + u8_l vif_idx; + /// MAC Address of the External STA + struct mac_addr ext_sta_addr; + /// MAC Address of the proxy (only valid if local is false) + struct mac_addr proxy_mac_addr; +}; + +/// Structure containing the parameters of the @ref MESH_PEER_UPDATE_IND message. +struct mesh_peer_update_ind +{ + /// Indicate if connection has been established or lost + bool_l estab; + /// VIF Index + u8_l vif_idx; + /// STA Index + u8_l sta_idx; + /// Peer MAC Address + struct mac_addr peer_addr; +}; + +/// Structure containing the parameters of the @ref MESH_PEER_UPDATE_NTF message. +struct mesh_peer_update_ntf +{ + /// VIF Index + u8_l vif_idx; + /// STA Index + u8_l sta_idx; + /// Mesh Link State + u8_l state; +}; + +/// Structure containing the parameters of the @ref MESH_PATH_UPDATE_IND message. +struct mesh_path_update_ind +{ + /// Indicate if path is deleted or not + bool_l delete; + /// Indicate if path is towards an external STA (not part of MBSS) + bool_l ext_sta; + /// VIF Index + u8_l vif_idx; + /// Path Index + u8_l path_idx; + /// Target MAC Address + struct mac_addr tgt_mac_addr; + /// External STA MAC Address (only if ext_sta is true) + struct mac_addr ext_sta_mac_addr; + /// Next Hop STA Index + u8_l nhop_sta_idx; +}; + +/////////////////////////////////////////////////////////////////////////////// +/////////// For Debug messages +/////////////////////////////////////////////////////////////////////////////// + +/// Messages related to Debug Task +enum dbg_msg_tag +{ + /// Memory read request + DBG_MEM_READ_REQ = LMAC_FIRST_MSG(TASK_DBG), + /// Memory read confirm + DBG_MEM_READ_CFM, + /// Memory write request + DBG_MEM_WRITE_REQ, + /// Memory write confirm + DBG_MEM_WRITE_CFM, + /// Module filter request + DBG_SET_MOD_FILTER_REQ, + /// Module filter confirm + DBG_SET_MOD_FILTER_CFM, + /// Severity filter request + DBG_SET_SEV_FILTER_REQ, + /// Severity filter confirm + DBG_SET_SEV_FILTER_CFM, + /// LMAC/MAC HW fatal error indication + DBG_ERROR_IND, + /// Request to get system statistics + DBG_GET_SYS_STAT_REQ, + /// COnfirmation of system statistics + DBG_GET_SYS_STAT_CFM, + /// Memory block write request + DBG_MEM_BLOCK_WRITE_REQ, + /// Memory block write confirm + DBG_MEM_BLOCK_WRITE_CFM, + /// Start app request + DBG_START_APP_REQ, + /// Start app confirm + DBG_START_APP_CFM, + /// Start npc request + DBG_START_NPC_REQ, + /// Start npc confirm + DBG_START_NPC_CFM, + /// Memory mask write request + DBG_MEM_MASK_WRITE_REQ, + /// Memory mask write confirm + DBG_MEM_MASK_WRITE_CFM, + + DBG_RFTEST_CMD_REQ, + DBG_RFTEST_CMD_CFM, + DBG_BINDING_REQ, + DBG_BINDING_CFM, + DBG_BINDING_IND, + + DBG_CUSTOM_MSG_REQ, + DBG_CUSTOM_MSG_CFM, + DBG_CUSTOM_MSG_IND, + + DBG_GPIO_WRITE_REQ, + DBG_GPIO_WRITE_CFM, + DBG_GPIO_READ_REQ, + DBG_GPIO_READ_CFM, + DBG_GPIO_INIT_REQ, + DBG_GPIO_INIT_CFM, + + /// EF usrdata read request + DBG_EF_USRDATA_READ_REQ, + /// EF usrdata read confirm + DBG_EF_USRDATA_READ_CFM, + /// Memory block read request + DBG_MEM_BLOCK_READ_REQ, + /// Memory block read confirm + DBG_MEM_BLOCK_READ_CFM, + + DBG_PWM_INIT_REQ, + DBG_PWM_INIT_CFM, + DBG_PWM_DEINIT_REQ, + DBG_PWM_DEINIT_CFM, + + /// Max number of Debug messages + DBG_MAX, +}; + +/// Structure containing the parameters of the @ref DBG_MEM_READ_REQ message. +struct dbg_mem_read_req +{ + u32_l memaddr; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_READ_CFM message. +struct dbg_mem_read_cfm +{ + u32_l memaddr; + u32_l memdata; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_WRITE_REQ message. +struct dbg_mem_write_req +{ + u32_l memaddr; + u32_l memdata; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_WRITE_CFM message. +struct dbg_mem_write_cfm +{ + u32_l memaddr; + u32_l memdata; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_MASK_WRITE_REQ message. +struct dbg_mem_mask_write_req +{ + u32_l memaddr; + u32_l memmask; + u32_l memdata; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_MASK_WRITE_CFM message. +struct dbg_mem_mask_write_cfm +{ + u32_l memaddr; + u32_l memdata; +}; + +struct dbg_rftest_cmd_req +{ + u32_l cmd; + u32_l argc; + u8_l argv[30]; +}; + +struct dbg_rftest_cmd_cfm +{ + u32_l rftest_result[32]; +}; + +struct dbg_gpio_write_req { + uint8_t gpio_idx; + uint8_t gpio_val; +}; + +struct dbg_gpio_read_req { + uint8_t gpio_idx; +}; + +struct dbg_gpio_read_cfm { + uint8_t gpio_idx; + uint8_t gpio_val; +}; + +struct dbg_gpio_init_req { + uint8_t gpio_idx; + uint8_t gpio_dir; //1 output, 0 input; + uint8_t gpio_val; //for output, 1 high, 0 low; +}; + +/// Structure containing the parameters of the @ref DBG_SET_MOD_FILTER_REQ message. +struct dbg_set_mod_filter_req +{ + /// Bit field indicating for each module if the traces are enabled or not + u32_l mod_filter; +}; + +/// Structure containing the parameters of the @ref DBG_SEV_MOD_FILTER_REQ message. +struct dbg_set_sev_filter_req +{ + /// Bit field indicating the severity threshold for the traces + u32_l sev_filter; +}; + +/// Structure containing the parameters of the @ref DBG_GET_SYS_STAT_CFM message. +struct dbg_get_sys_stat_cfm +{ + /// Time spent in CPU sleep since last reset of the system statistics + u32_l cpu_sleep_time; + /// Time spent in DOZE since last reset of the system statistics + u32_l doze_time; + /// Total time spent since last reset of the system statistics + u32_l stats_time; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_WRITE_REQ message. +struct dbg_mem_block_write_req +{ + u32_l memaddr; + u32_l memsize; + u32_l memdata[512 / sizeof(u32_l)];//1024 for 8801 +}; + +/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_WRITE_CFM message. +struct dbg_mem_block_write_cfm +{ + u32_l wstatus; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_READ_REQ message. +struct dbg_mem_block_read_req +{ + u32_l memaddr; + u32_l memsize; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_READ_CFM message. +struct dbg_mem_block_read_cfm +{ + u32_l memaddr; + u32_l memsize; + u32_l memdata[1024 / sizeof(u32_l)]; +}; + +/// Structure containing the parameters of the @ref DBG_START_APP_REQ message. +struct dbg_start_app_req +{ + u32_l bootaddr; + u32_l boottype; +}; + +/// Structure containing the parameters of the @ref DBG_START_APP_CFM message. +struct dbg_start_app_cfm +{ + u32_l bootstatus; +}; + +enum { + HOST_START_APP_AUTO = 1, + HOST_START_APP_CUSTOM, +//#ifdef CONFIG_USB_BT + HOST_START_APP_REBOOT, +//#endif // (CONFIG_USB_BT) + HOST_START_APP_FNCALL = 4, + HOST_START_APP_DUMMY = 5, +}; + + + +/////////////////////////////////////////////////////////////////////////////// +/////////// For TDLS messages +/////////////////////////////////////////////////////////////////////////////// + +/// List of messages related to the task. +enum tdls_msg_tag +{ + /// TDLS channel Switch Request. + TDLS_CHAN_SWITCH_REQ = LMAC_FIRST_MSG(TASK_TDLS), + /// TDLS channel switch confirmation. + TDLS_CHAN_SWITCH_CFM, + /// TDLS channel switch indication. + TDLS_CHAN_SWITCH_IND, + /// TDLS channel switch to base channel indication. + TDLS_CHAN_SWITCH_BASE_IND, + /// TDLS cancel channel switch request. + TDLS_CANCEL_CHAN_SWITCH_REQ, + /// TDLS cancel channel switch confirmation. + TDLS_CANCEL_CHAN_SWITCH_CFM, + /// TDLS peer power save indication. + TDLS_PEER_PS_IND, + /// TDLS peer traffic indication request. + TDLS_PEER_TRAFFIC_IND_REQ, + /// TDLS peer traffic indication confirmation. + TDLS_PEER_TRAFFIC_IND_CFM, + /// MAX number of messages + TDLS_MAX +}; + +/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_REQ message +struct tdls_chan_switch_req +{ + /// Index of the VIF + u8_l vif_index; + /// STA Index + u8_l sta_idx; + /// MAC address of the TDLS station + struct mac_addr peer_mac_addr; + bool_l initiator; + /// Band (2.4GHz or 5GHz) + u8_l band; + /// Channel type: 20,40,80,160 or 80+80 MHz + u8_l type; + /// Frequency for Primary 20MHz channel (in MHz) + u16_l prim20_freq; + /// Frequency for Center of the contiguous channel or center of Primary 80+80 + u16_l center1_freq; + /// Frequency for Center of the non-contiguous secondary 80+80 + u16_l center2_freq; + /// TX power (in dBm) + s8_l tx_power; + /// Operating class + u8_l op_class; +}; + +/// Structure containing the parameters of the @ref TDLS_CANCEL_CHAN_SWITCH_REQ message +struct tdls_cancel_chan_switch_req +{ + /// Index of the VIF + u8_l vif_index; + /// STA Index + u8_l sta_idx; + /// MAC address of the TDLS station + struct mac_addr peer_mac_addr; +}; + + +/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_CFM message +struct tdls_chan_switch_cfm +{ + /// Status of the operation + u8_l status; +}; + +/// Structure containing the parameters of the @ref TDLS_CANCEL_CHAN_SWITCH_CFM message +struct tdls_cancel_chan_switch_cfm +{ + /// Status of the operation + u8_l status; +}; + +/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_IND message +struct tdls_chan_switch_ind +{ + /// VIF Index + u8_l vif_index; + /// Channel Context Index + u8_l chan_ctxt_index; + /// Status of the operation + u8_l status; +}; + +/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_BASE_IND message +struct tdls_chan_switch_base_ind +{ + /// VIF Index + u8_l vif_index; + /// Channel Context index + u8_l chan_ctxt_index; +}; + +/// Structure containing the parameters of the @ref TDLS_PEER_PS_IND message +struct tdls_peer_ps_ind +{ + /// VIF Index + u8_l vif_index; + /// STA Index + u8_l sta_idx; + /// MAC ADDR of the TDLS STA + struct mac_addr peer_mac_addr; + /// Flag to indicate if the TDLS peer is going to sleep + bool ps_on; +}; + +/// Structure containing the parameters of the @ref TDLS_PEER_TRAFFIC_IND_REQ message +struct tdls_peer_traffic_ind_req +{ + /// VIF Index + u8_l vif_index; + /// STA Index + u8_l sta_idx; + // MAC ADDR of the TDLS STA + struct mac_addr peer_mac_addr; + /// Dialog token + u8_l dialog_token; + /// TID of the latest MPDU transmitted over the TDLS direct link to the TDLS STA + u8_l last_tid; + /// Sequence number of the latest MPDU transmitted over the TDLS direct link + /// to the TDLS STA + u16_l last_sn; +}; + +/// Structure containing the parameters of the @ref TDLS_PEER_TRAFFIC_IND_CFM message +struct tdls_peer_traffic_ind_cfm +{ + /// Status of the operation + u8_l status; +}; + +struct mm_set_wakeup_info_req { + u32_l hwconfig_id; + u16_l code; + u16_l offset; + u16_l length; + u8_l mask_and_pattern[]; + }; +#endif // LMAC_MSG_H_ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_types.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_types.h new file mode 100755 index 0000000..83b1122 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/lmac_types.h @@ -0,0 +1,62 @@ +/** + **************************************************************************************** + * + * @file co_types.h + * + * @brief This file replaces the need to include stdint or stdbool typical headers, + * which may not be available in all toolchains, and adds new types + * + * Copyright (C) RivieraWaves 2009-2019 + * + * $Rev: $ + * + **************************************************************************************** + */ + +#ifndef _LMAC_INT_H_ +#define _LMAC_INT_H_ + + +/** + **************************************************************************************** + * @addtogroup CO_INT + * @ingroup COMMON + * @brief Common integer standard types (removes use of stdint) + * + * @{ + **************************************************************************************** + */ + + +/* + * DEFINES + **************************************************************************************** + */ + +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) +#include +#else +#include +#endif + +#ifdef CONFIG_RWNX_TL4 +typedef uint16_t u8_l; +typedef int16_t s8_l; +typedef uint16_t bool_l; +#else +typedef uint8_t u8_l; +typedef int8_t s8_l; +typedef bool bool_l; +#endif +typedef uint16_t u16_l; +typedef int16_t s16_l; +typedef uint32_t u32_l; +typedef int32_t s32_l; +typedef uint64_t u64_l; + + + +/// @} CO_INT +#endif // _LMAC_INT_H_ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/md5.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/md5.c new file mode 100644 index 0000000..3d7b653 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/md5.c @@ -0,0 +1,161 @@ +#include +#include "md5.h" + +unsigned char PADDING[]={0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +void MD5Init(MD5_CTX *context) +{ + context->count[0] = 0; + context->count[1] = 0; + context->state[0] = 0x67452301; + context->state[1] = 0xEFCDAB89; + context->state[2] = 0x98BADCFE; + context->state[3] = 0x10325476; +} +void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen) +{ + unsigned int i = 0,index = 0,partlen = 0; + index = (context->count[0] >> 3) & 0x3F; + partlen = 64 - index; + context->count[0] += inputlen << 3; + if(context->count[0] < (inputlen << 3)) + context->count[1]++; + context->count[1] += inputlen >> 29; + + if(inputlen >= partlen) + { + memcpy(&context->buffer[index],input,partlen); + MD5Transform(context->state,context->buffer); + for(i = partlen;i+64 <= inputlen;i+=64) + MD5Transform(context->state,&input[i]); + index = 0; + } + else + { + i = 0; + } + memcpy(&context->buffer[index],&input[i],inputlen-i); +} +void MD5Final(MD5_CTX *context,unsigned char digest[16]) +{ + unsigned int index = 0,padlen = 0; + unsigned char bits[8]; + index = (context->count[0] >> 3) & 0x3F; + padlen = (index < 56)?(56-index):(120-index); + MD5Encode(bits,context->count,8); + MD5Update(context,PADDING,padlen); + MD5Update(context,bits,8); + MD5Encode(digest,context->state,16); +} +void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len) +{ + unsigned int i = 0,j = 0; + while(j < len) + { + output[j] = input[i] & 0xFF; + output[j+1] = (input[i] >> 8) & 0xFF; + output[j+2] = (input[i] >> 16) & 0xFF; + output[j+3] = (input[i] >> 24) & 0xFF; + i++; + j+=4; + } +} +void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len) +{ + unsigned int i = 0,j = 0; + while(j < len) + { + output[i] = (input[j]) | + (input[j+1] << 8) | + (input[j+2] << 16) | + (input[j+3] << 24); + i++; + j+=4; + } +} +void MD5Transform(unsigned int state[4],unsigned char block[64]) +{ + unsigned int a = state[0]; + unsigned int b = state[1]; + unsigned int c = state[2]; + unsigned int d = state[3]; + unsigned int x[64]; + MD5Decode(x,block,64); + FF(a, b, c, d, x[ 0], 7, 0xd76aa478); /* 1 */ + FF(d, a, b, c, x[ 1], 12, 0xe8c7b756); /* 2 */ + FF(c, d, a, b, x[ 2], 17, 0x242070db); /* 3 */ + FF(b, c, d, a, x[ 3], 22, 0xc1bdceee); /* 4 */ + FF(a, b, c, d, x[ 4], 7, 0xf57c0faf); /* 5 */ + FF(d, a, b, c, x[ 5], 12, 0x4787c62a); /* 6 */ + FF(c, d, a, b, x[ 6], 17, 0xa8304613); /* 7 */ + FF(b, c, d, a, x[ 7], 22, 0xfd469501); /* 8 */ + FF(a, b, c, d, x[ 8], 7, 0x698098d8); /* 9 */ + FF(d, a, b, c, x[ 9], 12, 0x8b44f7af); /* 10 */ + FF(c, d, a, b, x[10], 17, 0xffff5bb1); /* 11 */ + FF(b, c, d, a, x[11], 22, 0x895cd7be); /* 12 */ + FF(a, b, c, d, x[12], 7, 0x6b901122); /* 13 */ + FF(d, a, b, c, x[13], 12, 0xfd987193); /* 14 */ + FF(c, d, a, b, x[14], 17, 0xa679438e); /* 15 */ + FF(b, c, d, a, x[15], 22, 0x49b40821); /* 16 */ + + /* Round 2 */ + GG(a, b, c, d, x[ 1], 5, 0xf61e2562); /* 17 */ + GG(d, a, b, c, x[ 6], 9, 0xc040b340); /* 18 */ + GG(c, d, a, b, x[11], 14, 0x265e5a51); /* 19 */ + GG(b, c, d, a, x[ 0], 20, 0xe9b6c7aa); /* 20 */ + GG(a, b, c, d, x[ 5], 5, 0xd62f105d); /* 21 */ + GG(d, a, b, c, x[10], 9, 0x2441453); /* 22 */ + GG(c, d, a, b, x[15], 14, 0xd8a1e681); /* 23 */ + GG(b, c, d, a, x[ 4], 20, 0xe7d3fbc8); /* 24 */ + GG(a, b, c, d, x[ 9], 5, 0x21e1cde6); /* 25 */ + GG(d, a, b, c, x[14], 9, 0xc33707d6); /* 26 */ + GG(c, d, a, b, x[ 3], 14, 0xf4d50d87); /* 27 */ + GG(b, c, d, a, x[ 8], 20, 0x455a14ed); /* 28 */ + GG(a, b, c, d, x[13], 5, 0xa9e3e905); /* 29 */ + GG(d, a, b, c, x[ 2], 9, 0xfcefa3f8); /* 30 */ + GG(c, d, a, b, x[ 7], 14, 0x676f02d9); /* 31 */ + GG(b, c, d, a, x[12], 20, 0x8d2a4c8a); /* 32 */ + + /* Round 3 */ + HH(a, b, c, d, x[ 5], 4, 0xfffa3942); /* 33 */ + HH(d, a, b, c, x[ 8], 11, 0x8771f681); /* 34 */ + HH(c, d, a, b, x[11], 16, 0x6d9d6122); /* 35 */ + HH(b, c, d, a, x[14], 23, 0xfde5380c); /* 36 */ + HH(a, b, c, d, x[ 1], 4, 0xa4beea44); /* 37 */ + HH(d, a, b, c, x[ 4], 11, 0x4bdecfa9); /* 38 */ + HH(c, d, a, b, x[ 7], 16, 0xf6bb4b60); /* 39 */ + HH(b, c, d, a, x[10], 23, 0xbebfbc70); /* 40 */ + HH(a, b, c, d, x[13], 4, 0x289b7ec6); /* 41 */ + HH(d, a, b, c, x[ 0], 11, 0xeaa127fa); /* 42 */ + HH(c, d, a, b, x[ 3], 16, 0xd4ef3085); /* 43 */ + HH(b, c, d, a, x[ 6], 23, 0x4881d05); /* 44 */ + HH(a, b, c, d, x[ 9], 4, 0xd9d4d039); /* 45 */ + HH(d, a, b, c, x[12], 11, 0xe6db99e5); /* 46 */ + HH(c, d, a, b, x[15], 16, 0x1fa27cf8); /* 47 */ + HH(b, c, d, a, x[ 2], 23, 0xc4ac5665); /* 48 */ + + /* Round 4 */ + II(a, b, c, d, x[ 0], 6, 0xf4292244); /* 49 */ + II(d, a, b, c, x[ 7], 10, 0x432aff97); /* 50 */ + II(c, d, a, b, x[14], 15, 0xab9423a7); /* 51 */ + II(b, c, d, a, x[ 5], 21, 0xfc93a039); /* 52 */ + II(a, b, c, d, x[12], 6, 0x655b59c3); /* 53 */ + II(d, a, b, c, x[ 3], 10, 0x8f0ccc92); /* 54 */ + II(c, d, a, b, x[10], 15, 0xffeff47d); /* 55 */ + II(b, c, d, a, x[ 1], 21, 0x85845dd1); /* 56 */ + II(a, b, c, d, x[ 8], 6, 0x6fa87e4f); /* 57 */ + II(d, a, b, c, x[15], 10, 0xfe2ce6e0); /* 58 */ + II(c, d, a, b, x[ 6], 15, 0xa3014314); /* 59 */ + II(b, c, d, a, x[13], 21, 0x4e0811a1); /* 60 */ + II(a, b, c, d, x[ 4], 6, 0xf7537e82); /* 61 */ + II(d, a, b, c, x[11], 10, 0xbd3af235); /* 62 */ + II(c, d, a, b, x[ 2], 15, 0x2ad7d2bb); /* 63 */ + II(b, c, d, a, x[ 9], 21, 0xeb86d391); /* 64 */ + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/md5.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/md5.h new file mode 100644 index 0000000..6ed5c0f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/md5.h @@ -0,0 +1,48 @@ +#ifndef MD5_H +#define MD5_H + +typedef struct +{ + unsigned int count[2]; + unsigned int state[4]; + unsigned char buffer[64]; +}MD5_CTX; + + +#define F(x,y,z) ((x & y) | (~x & z)) +#define G(x,y,z) ((x & z) | (y & ~z)) +#define H(x,y,z) (x^y^z) +#define I(x,y,z) (y ^ (x | ~z)) +#define ROTATE_LEFT(x,n) ((x << n) | (x >> (32-n))) +#define FF(a,b,c,d,x,s,ac) \ + { \ + a += F(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +#define GG(a,b,c,d,x,s,ac) \ + { \ + a += G(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +#define HH(a,b,c,d,x,s,ac) \ + { \ + a += H(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +#define II(a,b,c,d,x,s,ac) \ + { \ + a += I(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +void MD5Init(MD5_CTX *context); +void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen); +void MD5Final(MD5_CTX *context,unsigned char digest[16]); +void MD5Transform(unsigned int state[4],unsigned char block[64]); +void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len); +void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len); + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/reg_access.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/reg_access.h new file mode 100644 index 0000000..112ef03 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/reg_access.h @@ -0,0 +1,161 @@ +/** + ****************************************************************************** + * + * @file reg_access.h + * + * @brief Definitions and macros for MAC HW and platform register accesses + * + * Copyright (C) RivieraWaves 2011-2019 + * + ****************************************************************************** + */ + +#ifndef REG_ACCESS_H_ +#define REG_ACCESS_H_ + +/***************************************************************************** + * Addresses within RWNX_ADDR_CPU + *****************************************************************************/ +#define RAM_LMAC_FW_ADDR 0x00150000 + +#define ROM_FMAC_FW_ADDR 0x00010000 +#define RAM_FMAC_FW_ADDR 0x00120000 +#define ROM_FMAC_PATCH_ADDR 0x00180000 +#if defined(CONFIG_DPD) || defined(CONFIG_LOFT_CALIB) +#define ROM_FMAC_CALIB_ADDR 0x00130000 +#endif + + +/***************************************************************************** + * Addresses within RWNX_ADDR_SYSTEM + *****************************************************************************/ +/* Shard RAM */ +#define SHARED_RAM_START_ADDR 0x00000000 + +/* IPC registers */ +#define IPC_REG_BASE_ADDR 0x00800000 + +/* System Controller Registers */ +#define SYSCTRL_SIGNATURE_ADDR 0x00900000 +// old diag register name +#define SYSCTRL_DIAG_CONF_ADDR 0x00900068 +#define SYSCTRL_PHYDIAG_CONF_ADDR 0x00900074 +#define SYSCTRL_RIUDIAG_CONF_ADDR 0x00900078 +// new diag register name +#define SYSCTRL_DIAG_CONF0 0x00900064 +#define SYSCTRL_DIAG_CONF1 0x00900068 +#define SYSCTRL_DIAG_CONF2 0x00900074 +#define SYSCTRL_DIAG_CONF3 0x00900078 +#define SYSCTRL_MISC_CNTL_ADDR 0x009000E0 +#define BOOTROM_ENABLE BIT(4) +#define FPGA_B_RESET BIT(1) +#define SOFT_RESET BIT(0) + +/* MAC platform */ +#define NXMAC_VERSION_1_ADDR 0x00B00004 +#define NXMAC_MU_MIMO_TX_BIT BIT(19) +#define NXMAC_BFMER_BIT BIT(18) +#define NXMAC_BFMEE_BIT BIT(17) +#define NXMAC_MAC_80211MH_FORMAT_BIT BIT(16) +#define NXMAC_COEX_BIT BIT(14) +#define NXMAC_WAPI_BIT BIT(13) +#define NXMAC_TPC_BIT BIT(12) +#define NXMAC_VHT_BIT BIT(11) +#define NXMAC_HT_BIT BIT(10) +#define NXMAC_RCE_BIT BIT(8) +#define NXMAC_CCMP_BIT BIT(7) +#define NXMAC_TKIP_BIT BIT(6) +#define NXMAC_WEP_BIT BIT(5) +#define NXMAC_SECURITY_BIT BIT(4) +#define NXMAC_SME_BIT BIT(3) +#define NXMAC_HCCA_BIT BIT(2) +#define NXMAC_EDCA_BIT BIT(1) +#define NXMAC_QOS_BIT BIT(0) + +#define NXMAC_RX_CNTRL_ADDR 0x00B00060 +#define NXMAC_EN_DUPLICATE_DETECTION_BIT BIT(31) +#define NXMAC_ACCEPT_UNKNOWN_BIT BIT(30) +#define NXMAC_ACCEPT_OTHER_DATA_FRAMES_BIT BIT(29) +#define NXMAC_ACCEPT_QO_S_NULL_BIT BIT(28) +#define NXMAC_ACCEPT_QCFWO_DATA_BIT BIT(27) +#define NXMAC_ACCEPT_Q_DATA_BIT BIT(26) +#define NXMAC_ACCEPT_CFWO_DATA_BIT BIT(25) +#define NXMAC_ACCEPT_DATA_BIT BIT(24) +#define NXMAC_ACCEPT_OTHER_CNTRL_FRAMES_BIT BIT(23) +#define NXMAC_ACCEPT_CF_END_BIT BIT(22) +#define NXMAC_ACCEPT_ACK_BIT BIT(21) +#define NXMAC_ACCEPT_CTS_BIT BIT(20) +#define NXMAC_ACCEPT_RTS_BIT BIT(19) +#define NXMAC_ACCEPT_PS_POLL_BIT BIT(18) +#define NXMAC_ACCEPT_BA_BIT BIT(17) +#define NXMAC_ACCEPT_BAR_BIT BIT(16) +#define NXMAC_ACCEPT_OTHER_MGMT_FRAMES_BIT BIT(15) +#define NXMAC_ACCEPT_BFMEE_FRAMES_BIT BIT(14) +#define NXMAC_ACCEPT_ALL_BEACON_BIT BIT(13) +#define NXMAC_ACCEPT_NOT_EXPECTED_BA_BIT BIT(12) +#define NXMAC_ACCEPT_DECRYPT_ERROR_FRAMES_BIT BIT(11) +#define NXMAC_ACCEPT_BEACON_BIT BIT(10) +#define NXMAC_ACCEPT_PROBE_RESP_BIT BIT(9) +#define NXMAC_ACCEPT_PROBE_REQ_BIT BIT(8) +#define NXMAC_ACCEPT_MY_UNICAST_BIT BIT(7) +#define NXMAC_ACCEPT_UNICAST_BIT BIT(6) +#define NXMAC_ACCEPT_ERROR_FRAMES_BIT BIT(5) +#define NXMAC_ACCEPT_OTHER_BSSID_BIT BIT(4) +#define NXMAC_ACCEPT_BROADCAST_BIT BIT(3) +#define NXMAC_ACCEPT_MULTICAST_BIT BIT(2) +#define NXMAC_DONT_DECRYPT_BIT BIT(1) +#define NXMAC_EXC_UNENCRYPTED_BIT BIT(0) + +#define NXMAC_DEBUG_PORT_SEL_ADDR 0x00B00510 +#define NXMAC_SW_SET_PROFILING_ADDR 0x00B08564 +#define NXMAC_SW_CLEAR_PROFILING_ADDR 0x00B08568 + +/* Modem Status */ +#define MDM_HDMCONFIG_ADDR 0x00C00000 + +/* Clock gating configuration */ +#define MDM_MEMCLKCTRL0_ADDR 0x00C00848 +#define MDM_CLKGATEFCTRL0_ADDR 0x00C00874 +#define CRM_CLKGATEFCTRL0_ADDR 0x00940010 + +/* AGC (trident) */ +#define AGC_RWNXAGCCNTL_ADDR 0x00C02060 + +/* LDPC RAM*/ +#define PHY_LDPC_RAM_ADDR 0x00C09000 + +/* FCU (elma )*/ +#define FCU_RWNXFCAGCCNTL_ADDR 0x00C09034 + +/* AGC RAM */ +#define PHY_AGC_UCODE_ADDR 0x00C0A000 + +/* RIU */ +#define RIU_RWNXVERSION_ADDR 0x00C0B000 +#define RIU_RWNXDYNAMICCONFIG_ADDR 0x00C0B008 +#define RIU_AGCMEMBISTSTAT_ADDR 0x00C0B238 +#define RIU_AGCMEMSIGNATURESTAT_ADDR 0x00C0B23C +#define RIU_RWNXAGCCNTL_ADDR 0x00C0B390 + +/* FCU RAM */ +#define PHY_FCU_UCODE_ADDR 0x00C0E000 + +/* RF ITF */ +#define FPGAB_MPIF_SEL_ADDR 0x00C10030 +#define RF_V6_DIAGPORT_CONF1_ADDR 0x00C10010 +#define RF_v6_PHYDIAG_CONF1_ADDR 0x00C10018 + +#define RF_V7_DIAGPORT_CONF1_ADDR 0x00F10010 +#define RF_v7_PHYDIAG_CONF1_ADDR 0x00F10018 + +/***************************************************************************** + * Macros for generated register files + *****************************************************************************/ +/* Macros for IPC registers access (used in reg_ipc_app.h) */ +#define REG_IPC_APP_RD(env, INDEX) \ + (*(volatile u32*)((u8*)env + IPC_REG_BASE_ADDR + 4*(INDEX))) + +#define REG_IPC_APP_WR(env, INDEX, value) \ + (*(volatile u32*)((u8*)env + IPC_REG_BASE_ADDR + 4*(INDEX)) = value) + +#endif /* REG_ACCESS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/reg_ipc_app.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/reg_ipc_app.h new file mode 100644 index 0000000..2a3b545 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/reg_ipc_app.h @@ -0,0 +1,299 @@ +/** + **************************************************************************************** + * + * @file rwnx_ipc_app.h + * + * @brief IPC module register definitions + * + * Copyright (C) RivieraWaves 2011-2019 + * + **************************************************************************************** + */ + +#ifndef _REG_IPC_APP_H_ +#define _REG_IPC_APP_H_ + +#ifndef __KERNEL__ +#include +#include "arch.h" +#else +#include "ipc_compat.h" +#endif +#include "reg_access.h" + +#define REG_IPC_APP_DECODING_MASK 0x0000007F + +/** + * @brief APP2EMB_TRIGGER register definition + *
+ *   Bits           Field Name   Reset Value
+ *  -----   ------------------   -----------
+ *  31:00      APP2EMB_TRIGGER   0x0
+ * 
+ */ +#define IPC_APP2EMB_TRIGGER_ADDR 0x12000000 +#define IPC_APP2EMB_TRIGGER_OFFSET 0x00000000 +#define IPC_APP2EMB_TRIGGER_INDEX 0x00000000 +#define IPC_APP2EMB_TRIGGER_RESET 0x00000000 + +__INLINE u32 ipc_app2emb_trigger_get(void *env) +{ + return REG_IPC_APP_RD(env, IPC_APP2EMB_TRIGGER_INDEX); +} + +__INLINE void ipc_app2emb_trigger_set(void *env, u32 value) +{ + REG_IPC_APP_WR(env, IPC_APP2EMB_TRIGGER_INDEX, value); +} + +// field definitions +#define IPC_APP2EMB_TRIGGER_MASK ((u32)0xFFFFFFFF) +#define IPC_APP2EMB_TRIGGER_LSB 0 +#define IPC_APP2EMB_TRIGGER_WIDTH ((u32)0x00000020) + +#define IPC_APP2EMB_TRIGGER_RST 0x0 + +__INLINE u32 ipc_app2emb_trigger_getf(void *env) +{ + u32 localVal = REG_IPC_APP_RD(env, IPC_APP2EMB_TRIGGER_INDEX); + ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0); + return (localVal >> 0); +} + +__INLINE void ipc_app2emb_trigger_setf(void *env, u32 app2embtrigger) +{ + ASSERT_ERR((((u32)app2embtrigger << 0) & ~((u32)0xFFFFFFFF)) == 0); + REG_IPC_APP_WR(env, IPC_APP2EMB_TRIGGER_INDEX, (u32)app2embtrigger << 0); +} + +/** + * @brief EMB2APP_RAWSTATUS register definition + *
+ *   Bits           Field Name   Reset Value
+ *  -----   ------------------   -----------
+ *  31:00    EMB2APP_RAWSTATUS   0x0
+ * 
+ */ +#define IPC_EMB2APP_RAWSTATUS_ADDR 0x12000004 +#define IPC_EMB2APP_RAWSTATUS_OFFSET 0x00000004 +#define IPC_EMB2APP_RAWSTATUS_INDEX 0x00000001 +#define IPC_EMB2APP_RAWSTATUS_RESET 0x00000000 + +__INLINE u32 ipc_emb2app_rawstatus_get(void *env) +{ + return REG_IPC_APP_RD(env, IPC_EMB2APP_RAWSTATUS_INDEX); +} + +__INLINE void ipc_emb2app_rawstatus_set(void *env, u32 value) +{ + REG_IPC_APP_WR(env, IPC_EMB2APP_RAWSTATUS_INDEX, value); +} + +// field definitions +#define IPC_EMB2APP_RAWSTATUS_MASK ((u32)0xFFFFFFFF) +#define IPC_EMB2APP_RAWSTATUS_LSB 0 +#define IPC_EMB2APP_RAWSTATUS_WIDTH ((u32)0x00000020) + +#define IPC_EMB2APP_RAWSTATUS_RST 0x0 + +__INLINE u32 ipc_emb2app_rawstatus_getf(void *env) +{ + u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_RAWSTATUS_INDEX); + ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0); + return (localVal >> 0); +} + +/** + * @brief EMB2APP_ACK register definition + *
+ *   Bits           Field Name   Reset Value
+ *  -----   ------------------   -----------
+ *  31:00          EMB2APP_ACK   0x0
+ * 
+ */ +#define IPC_EMB2APP_ACK_ADDR 0x12000008 +#define IPC_EMB2APP_ACK_OFFSET 0x00000008 +#define IPC_EMB2APP_ACK_INDEX 0x00000002 +#define IPC_EMB2APP_ACK_RESET 0x00000000 + +__INLINE u32 ipc_emb2app_ack_get(void *env) +{ + return REG_IPC_APP_RD(env, IPC_EMB2APP_ACK_INDEX); +} + +__INLINE void ipc_emb2app_ack_clear(void *env, u32 value) +{ + REG_IPC_APP_WR(env, IPC_EMB2APP_ACK_INDEX, value); +} + +// field definitions +#define IPC_EMB2APP_ACK_MASK ((u32)0xFFFFFFFF) +#define IPC_EMB2APP_ACK_LSB 0 +#define IPC_EMB2APP_ACK_WIDTH ((u32)0x00000020) + +#define IPC_EMB2APP_ACK_RST 0x0 + +__INLINE u32 ipc_emb2app_ack_getf(void *env) +{ + u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_ACK_INDEX); + ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0); + return (localVal >> 0); +} + +__INLINE void ipc_emb2app_ack_clearf(void *env, u32 emb2appack) +{ + ASSERT_ERR((((u32)emb2appack << 0) & ~((u32)0xFFFFFFFF)) == 0); + REG_IPC_APP_WR(env, IPC_EMB2APP_ACK_INDEX, (u32)emb2appack << 0); +} + +/** + * @brief EMB2APP_UNMASK_SET register definition + *
+ *   Bits           Field Name   Reset Value
+ *  -----   ------------------   -----------
+ *  31:00       EMB2APP_UNMASK   0x0
+ * 
+ */ +#define IPC_EMB2APP_UNMASK_SET_ADDR 0x1200000C +#define IPC_EMB2APP_UNMASK_SET_OFFSET 0x0000000C +#define IPC_EMB2APP_UNMASK_SET_INDEX 0x00000003 +#define IPC_EMB2APP_UNMASK_SET_RESET 0x00000000 + +__INLINE u32 ipc_emb2app_unmask_get(void *env) +{ + return REG_IPC_APP_RD(env, IPC_EMB2APP_UNMASK_SET_INDEX); +} + +__INLINE void ipc_emb2app_unmask_set(void *env, u32 value) +{ + REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_SET_INDEX, value); +} + +// field definitions +#define IPC_EMB2APP_UNMASK_MASK ((u32)0xFFFFFFFF) +#define IPC_EMB2APP_UNMASK_LSB 0 +#define IPC_EMB2APP_UNMASK_WIDTH ((u32)0x00000020) + +#define IPC_EMB2APP_UNMASK_RST 0x0 + +__INLINE u32 ipc_emb2app_unmask_getf(void *env) +{ + u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_UNMASK_SET_INDEX); + ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0); + return (localVal >> 0); +} + +__INLINE void ipc_emb2app_unmask_setf(void *env, u32 emb2appunmask) +{ + ASSERT_ERR((((u32)emb2appunmask << 0) & ~((u32)0xFFFFFFFF)) == 0); + REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_SET_INDEX, (u32)emb2appunmask << 0); +} + +/** + * @brief EMB2APP_UNMASK_CLEAR register definition + *
+ *   Bits           Field Name   Reset Value
+ *  -----   ------------------   -----------
+ *  31:00       EMB2APP_UNMASK   0x0
+ * 
+ */ +#define IPC_EMB2APP_UNMASK_CLEAR_ADDR 0x12000010 +#define IPC_EMB2APP_UNMASK_CLEAR_OFFSET 0x00000010 +#define IPC_EMB2APP_UNMASK_CLEAR_INDEX 0x00000004 +#define IPC_EMB2APP_UNMASK_CLEAR_RESET 0x00000000 + +__INLINE void ipc_emb2app_unmask_clear(void *env, u32 value) +{ + REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_CLEAR_INDEX, value); +} + +// fields defined in symmetrical set/clear register +__INLINE void ipc_emb2app_unmask_clearf(void *env, u32 emb2appunmask) +{ + ASSERT_ERR((((u32)emb2appunmask << 0) & ~((u32)0xFFFFFFFF)) == 0); + REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_CLEAR_INDEX, (u32)emb2appunmask << 0); +} + +/** + * @brief EMB2APP_STATUS register definition + *
+ *   Bits           Field Name   Reset Value
+ *  -----   ------------------   -----------
+ *  31:00       EMB2APP_STATUS   0x0
+ * 
+ */ +#ifdef CONFIG_RWNX_OLD_IPC +#define IPC_EMB2APP_STATUS_ADDR 0x12000014 +#define IPC_EMB2APP_STATUS_OFFSET 0x00000014 +#define IPC_EMB2APP_STATUS_INDEX 0x00000005 +#else +#define IPC_EMB2APP_STATUS_ADDR 0x1200001C +#define IPC_EMB2APP_STATUS_OFFSET 0x0000001C +#define IPC_EMB2APP_STATUS_INDEX 0x00000007 +#endif +#define IPC_EMB2APP_STATUS_RESET 0x00000000 + +__INLINE u32 ipc_emb2app_status_get(void *env) +{ + return REG_IPC_APP_RD(env, IPC_EMB2APP_STATUS_INDEX); +} + +__INLINE void ipc_emb2app_status_set(void *env, u32 value) +{ + REG_IPC_APP_WR(env, IPC_EMB2APP_STATUS_INDEX, value); +} + +// field definitions +#define IPC_EMB2APP_STATUS_MASK ((u32)0xFFFFFFFF) +#define IPC_EMB2APP_STATUS_LSB 0 +#define IPC_EMB2APP_STATUS_WIDTH ((u32)0x00000020) + +#define IPC_EMB2APP_STATUS_RST 0x0 + +__INLINE u32 ipc_emb2app_status_getf(void *env) +{ + u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_STATUS_INDEX); + ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0); + return (localVal >> 0); +} + +/** + * @brief APP_SIGNATURE register definition + *
+ *   Bits           Field Name   Reset Value
+ *  -----   ------------------   ----------
+ *  31:00        APP_SIGNATURE   0x0
+ * 
+ */ +#define IPC_APP_SIGNATURE_ADDR 0x12000040 +#define IPC_APP_SIGNATURE_OFFSET 0x00000040 +#define IPC_APP_SIGNATURE_INDEX 0x00000010 +#define IPC_APP_SIGNATURE_RESET 0x00000000 + +__INLINE u32 ipc_app_signature_get(void *env) +{ + return REG_IPC_APP_RD(env, IPC_APP_SIGNATURE_INDEX); +} + +__INLINE void ipc_app_signature_set(void *env, u32 value) +{ + REG_IPC_APP_WR(env, IPC_APP_SIGNATURE_INDEX, value); +} + +// field definitions +#define IPC_APP_SIGNATURE_MASK ((u32)0xFFFFFFFF) +#define IPC_APP_SIGNATURE_LSB 0 +#define IPC_APP_SIGNATURE_WIDTH ((u32)0x00000020) + +#define IPC_APP_SIGNATURE_RST 0x0 + +__INLINE u32 ipc_app_signature_getf(void *env) +{ + u32 localVal = REG_IPC_APP_RD(env, IPC_APP_SIGNATURE_INDEX); + ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0); + return (localVal >> 0); +} + + +#endif // _REG_IPC_APP_H_ + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/regdb.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/regdb.c new file mode 100644 index 0000000..d3ac7ac --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/regdb.c @@ -0,0 +1,2878 @@ +/** + ****************************************************************************** + * + * Copyright (C) 2020 AIC semiconductor. + * + * @file regdb.c + * + * @brief country code Dependent document + * https://mirrors.edge.kernel.org/pub/software/network/wireless-regdb/ + * wireless-regdb-2024.05.08 + * + ****************************************************************************** + */ + +#include +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) +#define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \ + { \ + .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \ + .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \ + .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \ + .power_rule.max_antenna_gain = DBI_TO_MBI(gain), \ + .power_rule.max_eirp = DBM_TO_MBM(eirp), \ + .flags = reg_flags, \ + } +#define NL80211_RRF_AUTO_BW 0 +#endif + + +static const struct ieee80211_regdomain regdom_00 = { + .alpha2 = "00", + .n_reg_rules = 2, + .reg_rules = { + // 1 - 14 + REG_RULE(2390 - 10, 2510 + 10, 40, 0, 20, 0), + // 36 - 165 + REG_RULE(5150 - 10, 5970 + 10, 80, 0, 20, 0), + } +}; + +#if 0 +static const struct ieee80211_regdomain regdom_00 = { + .n_reg_rules = 8, + .alpha2 = "00", + .reg_rules = { + REG_RULE_EXT(2402, 2472, 40, 0, 20, 0, 0), + // 12 - 13 + REG_RULE_EXT(2457, 2482, 20, 0, 20, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_IR | 0), + // 14 + REG_RULE_EXT(2474, 2494, 20, 0, 20, 0, NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM | 0), + // 36 - 48 + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_IR | 0), + // 52 - 64 + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_IR | NL80211_RRF_DFS | 0), + // 100 - 144 + REG_RULE_EXT(5490, 5730, 160, 0, 20, 0, NL80211_RRF_NO_IR | NL80211_RRF_DFS | 0), + //149 - 165 + REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, NL80211_RRF_NO_IR | 0), + REG_RULE_EXT(57240, 63720, 2160, 0, 0, 0, 0), + }}; +#endif + +static const struct ieee80211_regdomain regdom_AD = { + .alpha2 = "AD", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_AE = { + .alpha2 = "AE", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_AF = { + .alpha2 = "AF", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_AI = { + .alpha2 = "AI", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_AL = { + .alpha2 = "AL", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_AM = { + .alpha2 = "AM", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 20, 0, 18, 0, 0), + REG_RULE_EXT(5250, 5330, 20, 0, 18, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_AN = { + .alpha2 = "AN", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_AR = { + .alpha2 = "AR", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_AS = { + .alpha2 = "AS", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_AT = { + .alpha2 = "AT", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_AU = { + .alpha2 = "AU", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 36, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5600, 80, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5650, 5730, 80, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5730, 5850, 80, 0, 36, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5850, 5875, 20, 0, 14, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5925, 6425, 160, 0, 24, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 71000, 2160, 0, 43, 0, NL80211_RRF_NO_OUTDOOR | 0), + }, + .n_reg_rules = 9}; + +static const struct ieee80211_regdomain regdom_AW = { + .alpha2 = "AW", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_AZ = { + .alpha2 = "AZ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 18, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 18, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_BA = { + .alpha2 = "BA", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_BB = { + .alpha2 = "BB", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 23, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 4}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_BD = { + .alpha2 = "BD", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_BE = { + .alpha2 = "BE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_BF = { + .alpha2 = "BF", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_BG = { + .alpha2 = "BG", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_BH = { + .alpha2 = "BH", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 20, 0, 20, 0, 0), + REG_RULE_EXT(5250, 5330, 20, 0, 20, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 20, 0, 20, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_BL = { + .alpha2 = "BL", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_BM = { + .alpha2 = "BM", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_BN = { + .alpha2 = "BN", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_BO = { + .alpha2 = "BO", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5250, 5330, 80, 0, 30, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_BR = { + .alpha2 = "BR", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 30, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 27, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 27, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 30, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5925, 7125, 320, 0, 12, 0, NL80211_RRF_NO_OUTDOOR | NL80211_RRF_NO_IR | 0), + REG_RULE_EXT(57000, 71000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_BS = { + .alpha2 = "BS", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_BT = { + .alpha2 = "BT", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_BW = { + .alpha2 = "BW", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_BY = { + .alpha2 = "BY", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_BZ = { + .alpha2 = "BZ", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 2}; + +static const struct ieee80211_regdomain regdom_CA = { + .alpha2 = "CA", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5600, 80, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5650, 5730, 80, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + REG_RULE_EXT(5925, 7125, 320, 0, 12, 0, NL80211_RRF_NO_OUTDOOR | 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_CF = { + .alpha2 = "CF", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 40, 0, 17, 0, 0), + REG_RULE_EXT(5250, 5330, 40, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5730, 40, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 40, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_CH = { + .alpha2 = "CH", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 71000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_CI = { + .alpha2 = "CI", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_CL = { + .alpha2 = "CL", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_CN = { + .alpha2 = "CN", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 33, 0, 0), + REG_RULE_EXT(57240, 59400, 2160, 0, 28, 0, 0), + REG_RULE_EXT(59400, 63720, 2160, 0, 44, 0, 0), + REG_RULE_EXT(63720, 65880, 2160, 0, 28, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_CO = { + .alpha2 = "CO", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_CR = { + .alpha2 = "CR", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 20, 0, 17, 0, 0), + REG_RULE_EXT(5250, 5330, 20, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5730, 20, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 20, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_CU = { + .alpha2 = "CU", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 23, 0, 0), + REG_RULE_EXT(5150, 5350, 80, 0, 23, 0, NL80211_RRF_NO_IR | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5725, 80, 0, 24, 0, NL80211_RRF_NO_IR | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 23, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_CX = { + .alpha2 = "CX", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_CY = { + .alpha2 = "CY", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_CZ = { + .alpha2 = "CZ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_DE = { + .alpha2 = "DE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_DK = { + .alpha2 = "DK", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_DM = { + .alpha2 = "DM", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 23, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_DO = { + .alpha2 = "DO", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 23, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 4}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_DZ = { + .alpha2 = "DZ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_EC = { + .alpha2 = "EC", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 30, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_DFS | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 21, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 21, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_EE = { + .alpha2 = "EE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_EG = { + .alpha2 = "EG", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_ES = { + .alpha2 = "ES", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_ET = { + .alpha2 = "ET", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_FI = { + .alpha2 = "FI", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_FM = { + .alpha2 = "FM", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_FR = { + .alpha2 = "FR", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_GB = { + .alpha2 = "GB", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5730, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5925, 6425, 160, 0, 24, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 71000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_GD = { + .alpha2 = "GD", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_GE = { + .alpha2 = "GE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 18, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 18, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_GF = { + .alpha2 = "GF", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_GH = { + .alpha2 = "GH", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_GI = { + .alpha2 = "GI", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_GL = { + .alpha2 = "GL", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_GP = { + .alpha2 = "GP", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_GR = { + .alpha2 = "GR", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_GT = { + .alpha2 = "GT", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 23, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_GU = { + .alpha2 = "GU", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 20, 0, 17, 0, 0), + REG_RULE_EXT(5250, 5330, 20, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5730, 20, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 20, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_GY = { + .alpha2 = "GY", + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 23, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 23, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_HK = { + .alpha2 = "HK", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 36, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 23, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5730, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5730, 5850, 80, 0, 36, 0, 0), + REG_RULE_EXT(5925, 6425, 160, 0, 14, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_HN = { + .alpha2 = "HN", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_HR = { + .alpha2 = "HR", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 23, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_HT = { + .alpha2 = "HT", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_HU = { + .alpha2 = "HU", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_ID = { + .alpha2 = "ID", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 27, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5150, 5350, 80, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5725, 5825, 80, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_IE = { + .alpha2 = "IE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_IL = { + .alpha2 = "IL", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_IN = { + .alpha2 = "IN", + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 30, 0, 0), + REG_RULE_EXT(5250, 5350, 80, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_IQ = { + .alpha2 = "IQ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_IR = { + .alpha2 = "IR", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 2}; + +static const struct ieee80211_regdomain regdom_IS = { + .alpha2 = "IS", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_IT = { + .alpha2 = "IT", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_JM = { + .alpha2 = "JM", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_JO = { + .alpha2 = "JO", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, 0), + REG_RULE_EXT(5735, 5835, 80, 0, 23, 0, 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_JP = { + .alpha2 = "JP", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(2474, 2494, 20, 0, 20, 0, NL80211_RRF_NO_OFDM | 0), + REG_RULE_EXT(4910, 4990, 40, 0, 23, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 23, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5925, 6425, 320, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 10, 0, 0), + }, + .n_reg_rules = 8}; + +static const struct ieee80211_regdomain regdom_KE = { + .alpha2 = "KE", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, 0), + REG_RULE_EXT(5490, 5570, 80, 0, 30, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5775, 40, 0, 23, 0, 0), + }, + .n_reg_rules = 4}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_KG = { + .alpha2 = "KG", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_KH = { + .alpha2 = "KH", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_KN = { + .alpha2 = "KN", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 30, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5815, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_KP = { + .alpha2 = "KP", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 20, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 20, 0, 20, 0, 0), + REG_RULE_EXT(5250, 5330, 20, 0, 20, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5630, 20, 0, 30, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5815, 20, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_KR = { + .alpha2 = "KR", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 23, 0, 0), + REG_RULE_EXT(5150, 5230, 40, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5230, 5250, 20, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 20, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 23, 0, 0), + REG_RULE_EXT(5925, 7125, 160, 0, 15, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 43, 0, 0), + }, + .n_reg_rules = 8}; + +static const struct ieee80211_regdomain regdom_KW = { + .alpha2 = "KW", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_KY = { + .alpha2 = "KY", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_KZ = { + .alpha2 = "KZ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 20, 0, NL80211_RRF_DFS | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_LB = { + .alpha2 = "LB", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_LC = { + .alpha2 = "LC", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 30, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5815, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_LI = { + .alpha2 = "LI", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_LK = { + .alpha2 = "LK", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 20, 0, 17, 0, 0), + REG_RULE_EXT(5250, 5330, 20, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5730, 20, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 20, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_LS = { + .alpha2 = "LS", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_LT = { + .alpha2 = "LT", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_LU = { + .alpha2 = "LU", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_LV = { + .alpha2 = "LV", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_LY = { + .alpha2 = "LY", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_MA = { + .alpha2 = "MA", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_MC = { + .alpha2 = "MC", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_MD = { + .alpha2 = "MD", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_ME = { + .alpha2 = "ME", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_MF = { + .alpha2 = "MF", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_MH = { + .alpha2 = "MH", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_MK = { + .alpha2 = "MK", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_MN = { + .alpha2 = "MN", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_MO = { + .alpha2 = "MO", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 23, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 23, 0, NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 30, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_MP = { + .alpha2 = "MP", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_MQ = { + .alpha2 = "MQ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_MR = { + .alpha2 = "MR", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_MT = { + .alpha2 = "MT", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_MU = { + .alpha2 = "MU", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_MV = { + .alpha2 = "MV", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 20, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_MW = { + .alpha2 = "MW", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_MX = { + .alpha2 = "MX", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_MY = { + .alpha2 = "MY", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_NA = { + .alpha2 = "NA", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_NG = { + .alpha2 = "NG", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_NI = { + .alpha2 = "NI", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_NL = { + .alpha2 = "NL", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_NO = { + .alpha2 = "NO", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 71000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_NP = { + .alpha2 = "NP", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_NZ = { + .alpha2 = "NZ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_OM = { + .alpha2 = "OM", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_PA = { + .alpha2 = "PA", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 36, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 36, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 30, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 30, 0, 0), + REG_RULE_EXT(5725, 5850, 80, 0, 36, 0, 0), + REG_RULE_EXT(57000, 64000, 2160, 0, 43, 0, 0), + }, + .n_reg_rules = 6}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_PE = { + .alpha2 = "PE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_PF = { + .alpha2 = "PF", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_PG = { + .alpha2 = "PG", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_PH = { + .alpha2 = "PH", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_PK = { + .alpha2 = "PK", + .dfs_region = NL80211_DFS_JP, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5270, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5270, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | + NL80211_RRF_DFS | 0), + REG_RULE_EXT(5470, 5610, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | + NL80211_RRF_DFS | 0), + REG_RULE_EXT(5610, 5725, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 30, 0, 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_PL = { + .alpha2 = "PL", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_PM = { + .alpha2 = "PM", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_PR = { + .alpha2 = "PR", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_PT = { + .alpha2 = "PT", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_PW = { + .alpha2 = "PW", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_PY = { + .alpha2 = "PY", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_QA = { + .alpha2 = "QA", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | + NL80211_RRF_DFS | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, NL80211_RRF_NO_OUTDOOR | 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_RE = { + .alpha2 = "RE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_RO = { + .alpha2 = "RO", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_RS = { + .alpha2 = "RS", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | + NL80211_RRF_DFS | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_RU = { + .alpha2 = "RU", + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5350, 160, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5650, 5850, 160, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5925, 6425, 160, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, + NL80211_RRF_NO_OUTDOOR | 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_RW = { + .alpha2 = "RW", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_SA = { + .alpha2 = "SA", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_SE = { + .alpha2 = "SE", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_SG = { + .alpha2 = "SG", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 23, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5730, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 30, 0, 0), + REG_RULE_EXT(5945, 6425, 320, 0, 24, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_SI = { + .alpha2 = "SI", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_SK = { + .alpha2 = "SK", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_SM = { + .alpha2 = "SM", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_SN = { + .alpha2 = "SN", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_SR = { + .alpha2 = "SR", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_SV = { + .alpha2 = "SV", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 20, 0, 17, 0, 0), + REG_RULE_EXT(5250, 5330, 20, 0, 23, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 20, 0, 30, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_SY = { + .alpha2 = "SY", + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + }, + .n_reg_rules = 1}; + +static const struct ieee80211_regdomain regdom_TC = { + .alpha2 = "TC", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_TD = { + .alpha2 = "TD", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_TG = { + .alpha2 = "TG", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 40, 0, 20, 0, 0), + REG_RULE_EXT(5250, 5330, 40, 0, 20, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5710, 40, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_TH = { + .alpha2 = "TH", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_TJ = { + .alpha2 = "TJ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_TM = { + .alpha2 = "TM", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_TN = { + .alpha2 = "TN", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_TR = { + .alpha2 = "TR", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5495, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_TT = { + .alpha2 = "TT", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_TW = { + .alpha2 = "TW", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 30, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 23, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5730, 160, 0, 23, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 30, 0, 0), + REG_RULE_EXT(5945, 6425, 320, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_TZ = { + .alpha2 = "TZ", + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 2}; + + +static const struct ieee80211_regdomain regdom_UA = { + .alpha2 = "UA", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5150, 5250, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_NO_OUTDOOR | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5725, 5850, 80, 0, 20, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 16, 0, + NL80211_RRF_NO_OUTDOOR | 0), + }, + .n_reg_rules = 6}; + +static const struct ieee80211_regdomain regdom_UG = { + .alpha2 = "UG", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_US = { + .alpha2 = "US", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + // 1...11 + REG_RULE_EXT(2400, 2472, 40, 0, 30, 0, 0), + // 36 40 44 48 + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + // 52 56 60 64 + REG_RULE_EXT(5250, 5350, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + // 100 104 108 112 116 120 124 + REG_RULE_EXT(5470, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + // 128 132 136 140 + REG_RULE_EXT(5730, 5850, 80, 0, 30, 0, NL80211_RRF_AUTO_BW | 0), + // 149 153 157 161 165 + REG_RULE_EXT(5850, 5895, 40, 0, 27, 0, + NL80211_RRF_AUTO_BW | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(5925, 7125, 320, 0, 12, 0, + NL80211_RRF_NO_IR | NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57240, 71000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 8}; + +//Certified laboratory requirements +static const struct ieee80211_regdomain regdom_UY = { + .alpha2 = "UY", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_UZ = { + .alpha2 = "UZ", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + }, + .n_reg_rules = 3}; + +static const struct ieee80211_regdomain regdom_VC = { + .alpha2 = "VC", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_VE = { + .alpha2 = "VE", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 23, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_VI = { + .alpha2 = "VI", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 24, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_VN = { + .alpha2 = "VN", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5730, 80, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_VU = { + .alpha2 = "VU", + .dfs_region = NL80211_DFS_FCC, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 24, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5730, 160, 0, 24, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0), + }, + .n_reg_rules = 5}; + +static const struct ieee80211_regdomain regdom_WF = { + .alpha2 = "WF", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_WS = { + .alpha2 = "WS", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 40, 0, 20, 0, 0), + REG_RULE_EXT(5250, 5330, 40, 0, 20, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5490, 5710, 40, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_FK = { + .alpha2 = "FK", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_FO = { + .alpha2 = "FO", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_YE = { + .alpha2 = "YE", + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + }, + .n_reg_rules = 1}; + +static const struct ieee80211_regdomain regdom_YT = { + .alpha2 = "YT", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +static const struct ieee80211_regdomain regdom_ZA = { + .alpha2 = "ZA", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 30, 0, 0), + }, + .n_reg_rules = 4}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_ZM = { + .alpha2 = "ZM", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0), + REG_RULE_EXT(5150, 5250, 80, 0, 23, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5350, 80, 0, 20, 0, + NL80211_RRF_NO_OUTDOOR | NL80211_RRF_DFS | + NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5470, 5725, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + REG_RULE_EXT(5725, 5875, 80, 0, 14, 0, 0), + REG_RULE_EXT(5945, 6425, 160, 0, 23, 0, NL80211_RRF_NO_OUTDOOR | 0), + REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0), + }, + .n_reg_rules = 7}; + +static const struct ieee80211_regdomain regdom_ZW = { + .alpha2 = "ZW", + .dfs_region = NL80211_DFS_ETSI, + .reg_rules = + { + REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0), + REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5250, 5330, 80, 0, 20, 0, + NL80211_RRF_DFS | NL80211_RRF_AUTO_BW | 0), + REG_RULE_EXT(5490, 5710, 160, 0, 27, 0, NL80211_RRF_DFS | 0), + }, + .n_reg_rules = 4}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_WW = { + .n_reg_rules = 2, + .alpha2 = "WW", + .reg_rules = + { + // 1...14 + REG_RULE(2390 - 10, 2510 + 10, 40, 0, 20, 0), + // 36...165 + REG_RULE(5150 - 10, 5970 + 10, 80, 0, 20, 0), + }, +}; + +//kernel undefined +static const struct ieee80211_regdomain regdom_XX = { + .n_reg_rules = 1, + .alpha2 = "XX", + .reg_rules = + { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0), + }, +}; + +//Keep "XX" at the end when inserting +const struct ieee80211_regdomain *reg_regdb[] = { + ®dom_00, ®dom_AD, ®dom_AE, ®dom_AF, ®dom_AI, ®dom_AL, + ®dom_AM, ®dom_AN, ®dom_AR, ®dom_AS, ®dom_AT, ®dom_AU, + ®dom_AW, ®dom_AZ, ®dom_BA, ®dom_BB, ®dom_BD, ®dom_BE, + ®dom_BF, ®dom_BG, ®dom_BH, ®dom_BL, ®dom_BM, ®dom_BN, + ®dom_BO, ®dom_BR, ®dom_BS, ®dom_BT, ®dom_BY, ®dom_BZ, + ®dom_CA, ®dom_CF, ®dom_CH, ®dom_CI, ®dom_CL, ®dom_CN, + ®dom_CO, ®dom_CR, ®dom_CX, ®dom_CY, ®dom_CZ, ®dom_DE, + ®dom_DK, ®dom_DM, ®dom_DO, ®dom_DZ, ®dom_EC, ®dom_EE, + ®dom_EG, ®dom_ES, ®dom_ET, ®dom_FI, ®dom_FM, ®dom_FR, + ®dom_GB, ®dom_GD, ®dom_GE, ®dom_GF, ®dom_GH, ®dom_GL, + ®dom_GP, ®dom_GR, ®dom_GT, ®dom_GU, ®dom_GY, ®dom_HK, + ®dom_HN, ®dom_HR, ®dom_HT, ®dom_HU, ®dom_ID, ®dom_IE, + ®dom_IL, ®dom_IN, ®dom_IR, ®dom_IS, ®dom_IT, ®dom_JM, + ®dom_JO, ®dom_JP, ®dom_KE, ®dom_KH, ®dom_KN, ®dom_KP, + ®dom_KR, ®dom_KW, ®dom_KY, ®dom_KZ, ®dom_LB, ®dom_LC, + ®dom_LI, ®dom_LK, ®dom_LS, ®dom_LT, ®dom_LU, ®dom_LV, + ®dom_MA, ®dom_MC, ®dom_MD, ®dom_ME, ®dom_MF, ®dom_MH, + ®dom_MK, ®dom_MN, ®dom_MO, ®dom_MP, ®dom_MQ, ®dom_MR, + ®dom_MT, ®dom_MU, ®dom_MW, ®dom_MX, ®dom_MY, ®dom_NI, + ®dom_NL, ®dom_NO, ®dom_NP, ®dom_NZ, ®dom_OM, ®dom_PA, + ®dom_PE, ®dom_PF, ®dom_PG, ®dom_PH, ®dom_PK, ®dom_PL, + ®dom_PM, ®dom_PR, ®dom_PT, ®dom_PW, ®dom_PY, ®dom_QA, + ®dom_RE, ®dom_RO, ®dom_RS, ®dom_RU, ®dom_RW, ®dom_SA, + ®dom_SE, ®dom_SG, ®dom_SI, ®dom_SK, ®dom_SN, ®dom_SR, + ®dom_SV, ®dom_SY, ®dom_TC, ®dom_TD, ®dom_TG, ®dom_TH, + ®dom_TN, ®dom_TR, ®dom_TT, ®dom_TW, ®dom_UA, ®dom_UG, + ®dom_US, ®dom_UY, ®dom_UZ, ®dom_VC, ®dom_VE, ®dom_VI, + ®dom_VN, ®dom_VU, ®dom_WF, ®dom_YE, ®dom_YT, ®dom_ZA, + ®dom_ZW, ®dom_BW, ®dom_GI, ®dom_IQ, ®dom_KG, ®dom_LY, + ®dom_MV, ®dom_NA, ®dom_NG, ®dom_SM, ®dom_TJ, ®dom_TM, + ®dom_FK, ®dom_FO, ®dom_ZM, ®dom_CU, ®dom_TZ, ®dom_WS, + ®dom_WW, ®dom_XX, +}; + +int reg_regdb_size = ARRAY_SIZE(reg_regdb); diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_bfmer.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_bfmer.c new file mode 100644 index 0000000..4be5ec0 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_bfmer.c @@ -0,0 +1,105 @@ +/** + ****************************************************************************** + * + * @file rwnx_bfmer.c + * + * @brief VHT Beamformer function definitions + * + * Copyright (C) RivieraWaves 2016-2019 + * + ****************************************************************************** + */ + +/** + * INCLUDE FILES + ****************************************************************************** + */ + +#include +#include "rwnx_bfmer.h" + +/** + * FUNCTION DEFINITIONS + ****************************************************************************** + */ + +int rwnx_bfmer_report_add(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta, + unsigned int length) +{ + gfp_t flags; + struct rwnx_bfmer_report *bfm_report ; + + if (in_softirq()) + flags = GFP_ATOMIC; + else + flags = GFP_KERNEL; + + /* Allocate a structure that will contain the beamforming report */ + bfm_report = kmalloc(sizeof(*bfm_report) + length, flags); + + + /* Check report allocation */ + if (!bfm_report) { + /* Do not use beamforming */ + return -1; + } + + /* Store report length */ + bfm_report->length = length; + + /* + * Need to provide a Virtual Address to the MAC so that it can + * upload the received Beamforming Report in driver memory + */ + bfm_report->dma_addr = dma_map_single(rwnx_hw->dev, &bfm_report->report[0], + length, DMA_FROM_DEVICE); + + /* Check DMA mapping result */ + if (dma_mapping_error(rwnx_hw->dev, bfm_report->dma_addr)) { + /* Free allocated report */ + kfree(bfm_report); + /* And leave */ + return -1; + } + + /* Store report structure */ + rwnx_sta->bfm_report = bfm_report; + + return 0; +} + +void rwnx_bfmer_report_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta) +{ + /* Verify if a report has been allocated */ + if (rwnx_sta->bfm_report) { + struct rwnx_bfmer_report *bfm_report = rwnx_sta->bfm_report; + + /* Unmap DMA region */ + dma_unmap_single(rwnx_hw->dev, bfm_report->dma_addr, + bfm_report->length, DMA_BIDIRECTIONAL); + + /* Free allocated report structure and clean the pointer */ + kfree(bfm_report); + rwnx_sta->bfm_report = NULL; + } +} + +#ifdef CONFIG_RWNX_FULLMAC +u8 rwnx_bfmer_get_rx_nss(const struct ieee80211_vht_cap *vht_capa) +{ + int i; + u8 rx_nss = 0; + u16 rx_mcs_map = le16_to_cpu(vht_capa->supp_mcs.rx_mcs_map); + + for (i = 7; i >= 0; i--) { + u8 mcs = (rx_mcs_map >> (2 * i)) & 3; + + if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + rx_nss = i + 1; + break; + } + } + + return rx_nss; +} +#endif /* CONFIG_RWNX_FULLMAC */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_bfmer.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_bfmer.h new file mode 100644 index 0000000..9a313bf --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_bfmer.h @@ -0,0 +1,100 @@ +/** + ****************************************************************************** + * + * @file rwnx_bfmer.h + * + * @brief VHT Beamformer function declarations + * + * Copyright (C) RivieraWaves 2016-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_BFMER_H_ +#define _RWNX_BFMER_H_ + +/** + * INCLUDE FILES + ****************************************************************************** + */ + +#include "rwnx_defs.h" + +/** + * DEFINES + ****************************************************************************** + */ + +/// Maximal supported report length (in bytes) +#define RWNX_BFMER_REPORT_MAX_LEN 2048 + +/// Size of the allocated report space (twice the maximum report length) +#define RWNX_BFMER_REPORT_SPACE_SIZE (RWNX_BFMER_REPORT_MAX_LEN * 2) + +/** + * TYPE DEFINITIONS + ****************************************************************************** + */ + +/* + * Structure used to store a beamforming report. + */ +struct rwnx_bfmer_report { + dma_addr_t dma_addr; /* Virtual address provided to MAC for + DMA transfer of the Beamforming Report */ + unsigned int length; /* Report Length */ + u8 report[1]; /* Report to be used for VHT TX Beamforming */ +}; + +/** + * FUNCTION DECLARATIONS + ****************************************************************************** + */ + +/** + ****************************************************************************** + * @brief Allocate memory aiming to contains the Beamforming Report received + * from a Beamformee capable capable. + * The providing length shall be large enough to contain the VHT Compressed + * Beaforming Report and the MU Exclusive part. + * It also perform a DMA Mapping providing an address to be provided to the HW + * responsible for the DMA transfer of the report. + * If successful a struct rwnx_bfmer_report object is allocated, it's address + * is stored in rwnx_sta->bfm_report. + * + * @param[in] rwnx_hw PHY Information + * @param[in] rwnx_sta Peer STA Information + * @param[in] length Memory size to be allocated + * + * @return 0 if operation is successful, else -1. + ****************************************************************************** + */ +int rwnx_bfmer_report_add(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta, + unsigned int length); + +/** + ****************************************************************************** + * @brief Free a previously allocated memory intended to be used for + * Beamforming Reports. + * + * @param[in] rwnx_hw PHY Information + * @param[in] rwnx_sta Peer STA Information + * + ****************************************************************************** + */ +void rwnx_bfmer_report_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta); + +#ifdef CONFIG_RWNX_FULLMAC +/** + ****************************************************************************** + * @brief Parse a Rx VHT-MCS map in order to deduce the maximum number of + * Spatial Streams supported by a beamformee. + * + * @param[in] vht_capa Received VHT Capability field. + * + ****************************************************************************** + */ +u8 rwnx_bfmer_get_rx_nss(const struct ieee80211_vht_cap *vht_capa); +#endif /* CONFIG_RWNX_FULLMAC */ + +#endif /* _RWNX_BFMER_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cfgfile.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cfgfile.c new file mode 100644 index 0000000..2af5c30 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cfgfile.c @@ -0,0 +1,237 @@ +/** + **************************************************************************************** + * + * @file rwnx_configparse.c + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ +#include +#include + +#include "rwnx_defs.h" +#include "rwnx_cfgfile.h" + +/** + * + */ +static const char *rwnx_find_tag(const u8 *file_data, unsigned int file_size, + const char *tag_name, unsigned int tag_len) +{ + unsigned int curr, line_start = 0, line_size; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Walk through all the lines of the configuration file */ + while (line_start < file_size) { + /* Search the end of the current line (or the end of the file) */ + for (curr = line_start; curr < file_size; curr++) + if (file_data[curr] == '\n') + break; + + /* Compute the line size */ + line_size = curr - line_start; + + /* Check if this line contains the expected tag */ + if ((line_size == (strlen(tag_name) + tag_len)) && + (!strncmp(&file_data[line_start], tag_name, strlen(tag_name)))) + return (&file_data[line_start + strlen(tag_name)]); + + /* Move to next line */ + line_start = curr + 1; + } + + /* Tag not found */ + return NULL; +} + +/** + * Parse the Config file used at init time + */ +int rwnx_parse_configfile(struct rwnx_hw *rwnx_hw, const char *filename, + struct rwnx_conf_file *config) +{ + const struct firmware *config_fw; + u8 dflt_mac[ETH_ALEN] = { 0, 111, 111, 111, 111, 0 }; + int ret; + const u8 *tag_ptr; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if ((ret = request_firmware(&config_fw, filename, rwnx_hw->dev))) { + printk(KERN_CRIT "%s: Failed to get %s (%d)\n", __func__, filename, ret); + return ret; + } + + /* Get MAC Address */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "MAC_ADDR=", strlen("00:00:00:00:00:00")); + if (tag_ptr != NULL) { + u8 *addr = config->mac_addr; + if (sscanf(tag_ptr, + "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + addr + 0, addr + 1, addr + 2, + addr + 3, addr + 4, addr + 5) != ETH_ALEN) + memcpy(config->mac_addr, dflt_mac, ETH_ALEN); + } else + memcpy(config->mac_addr, dflt_mac, ETH_ALEN); + + RWNX_DBG("MAC Address is:\n%pM\n", config->mac_addr); + + /* Release the configuration file */ + release_firmware(config_fw); + + return 0; +} + +/** + * Parse the Config file used at init time + */ +int rwnx_parse_phy_configfile(struct rwnx_hw *rwnx_hw, const char *filename, + struct rwnx_phy_conf_file *config, int path) +{ + const struct firmware *config_fw; + int ret; + const u8 *tag_ptr; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if ((ret = request_firmware(&config_fw, filename, rwnx_hw->dev))) { + printk(KERN_CRIT "%s: Failed to get %s (%d)\n", __func__, filename, ret); + return ret; + } + + /* Get Trident path mapping */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "TRD_PATH_MAPPING=", strlen("00")); + if (tag_ptr != NULL) { + u8 val; + if (sscanf(tag_ptr, "%hhx", &val) == 1) + config->trd.path_mapping = val; + else + config->trd.path_mapping = path; + } else + config->trd.path_mapping = path; + + RWNX_DBG("Trident path mapping is: %d\n", config->trd.path_mapping); + + /* Get DC offset compensation */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "TX_DC_OFF_COMP=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->trd.tx_dc_off_comp) != 1) + config->trd.tx_dc_off_comp = 0; + } else + config->trd.tx_dc_off_comp = 0; + + RWNX_DBG("TX DC offset compensation is: %08X\n", config->trd.tx_dc_off_comp); + + /* Get Karst TX IQ compensation value for path0 on 2.4GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_TX_IQ_COMP_2_4G_PATH_0=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_2_4G[0]) != 1) + config->karst.tx_iq_comp_2_4G[0] = 0x01000000; + } else + config->karst.tx_iq_comp_2_4G[0] = 0x01000000; + + RWNX_DBG("Karst TX IQ compensation for path 0 on 2.4GHz is: %08X\n", config->karst.tx_iq_comp_2_4G[0]); + + /* Get Karst TX IQ compensation value for path1 on 2.4GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_TX_IQ_COMP_2_4G_PATH_1=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_2_4G[1]) != 1) + config->karst.tx_iq_comp_2_4G[1] = 0x01000000; + } else + config->karst.tx_iq_comp_2_4G[1] = 0x01000000; + + RWNX_DBG("Karst TX IQ compensation for path 1 on 2.4GHz is: %08X\n", config->karst.tx_iq_comp_2_4G[1]); + + /* Get Karst RX IQ compensation value for path0 on 2.4GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_RX_IQ_COMP_2_4G_PATH_0=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_2_4G[0]) != 1) + config->karst.rx_iq_comp_2_4G[0] = 0x01000000; + } else + config->karst.rx_iq_comp_2_4G[0] = 0x01000000; + + RWNX_DBG("Karst RX IQ compensation for path 0 on 2.4GHz is: %08X\n", config->karst.rx_iq_comp_2_4G[0]); + + /* Get Karst RX IQ compensation value for path1 on 2.4GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_RX_IQ_COMP_2_4G_PATH_1=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_2_4G[1]) != 1) + config->karst.rx_iq_comp_2_4G[1] = 0x01000000; + } else + config->karst.rx_iq_comp_2_4G[1] = 0x01000000; + + RWNX_DBG("Karst RX IQ compensation for path 1 on 2.4GHz is: %08X\n", config->karst.rx_iq_comp_2_4G[1]); + + /* Get Karst TX IQ compensation value for path0 on 5GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_TX_IQ_COMP_5G_PATH_0=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_5G[0]) != 1) + config->karst.tx_iq_comp_5G[0] = 0x01000000; + } else + config->karst.tx_iq_comp_5G[0] = 0x01000000; + + RWNX_DBG("Karst TX IQ compensation for path 0 on 5GHz is: %08X\n", config->karst.tx_iq_comp_5G[0]); + + /* Get Karst TX IQ compensation value for path1 on 5GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_TX_IQ_COMP_5G_PATH_1=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_5G[1]) != 1) + config->karst.tx_iq_comp_5G[1] = 0x01000000; + } else + config->karst.tx_iq_comp_5G[1] = 0x01000000; + + RWNX_DBG("Karst TX IQ compensation for path 1 on 5GHz is: %08X\n", config->karst.tx_iq_comp_5G[1]); + + /* Get Karst RX IQ compensation value for path0 on 5GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_RX_IQ_COMP_5G_PATH_0=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_5G[0]) != 1) + config->karst.rx_iq_comp_5G[0] = 0x01000000; + } else + config->karst.rx_iq_comp_5G[0] = 0x01000000; + + RWNX_DBG("Karst RX IQ compensation for path 0 on 5GHz is: %08X\n", config->karst.rx_iq_comp_5G[0]); + + /* Get Karst RX IQ compensation value for path1 on 5GHz */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_RX_IQ_COMP_5G_PATH_1=", strlen("00000000")); + if (tag_ptr != NULL) { + if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_5G[1]) != 1) + config->karst.rx_iq_comp_5G[1] = 0x01000000; + } else + config->karst.rx_iq_comp_5G[1] = 0x01000000; + + RWNX_DBG("Karst RX IQ compensation for path 1 on 5GHz is: %08X\n", config->karst.rx_iq_comp_5G[1]); + + /* Get Karst default path */ + tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size, + "KARST_DEFAULT_PATH=", strlen("00")); + if (tag_ptr != NULL) { + u8 val; + if (sscanf(tag_ptr, "%hhx", &val) == 1) + config->karst.path_used = val; + else + config->karst.path_used = path; + } else + config->karst.path_used = path; + + RWNX_DBG("Karst default path is: %d\n", config->karst.path_used); + + /* Release the configuration file */ + release_firmware(config_fw); + + return 0; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cfgfile.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cfgfile.h new file mode 100644 index 0000000..2e91f7f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cfgfile.h @@ -0,0 +1,35 @@ +/** + **************************************************************************************** + * + * @file rwnx_cfgfile.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ + +#ifndef _RWNX_CFGFILE_H_ +#define _RWNX_CFGFILE_H_ + +/* + * Structure used to retrieve information from the Config file used at Initialization time + */ +struct rwnx_conf_file { + u8 mac_addr[ETH_ALEN]; +}; + +/* + * Structure used to retrieve information from the PHY Config file used at Initialization time + */ +struct rwnx_phy_conf_file { + struct phy_trd_cfg_tag trd; + struct phy_karst_cfg_tag karst; +}; + +int rwnx_parse_configfile(struct rwnx_hw *rwnx_hw, const char *filename, + struct rwnx_conf_file *config); + +int rwnx_parse_phy_configfile(struct rwnx_hw *rwnx_hw, const char *filename, + struct rwnx_phy_conf_file *config, int path); + +#endif /* _RWNX_CFGFILE_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cmds.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cmds.c new file mode 100755 index 0000000..8dbb88e --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cmds.c @@ -0,0 +1,604 @@ +/** + ****************************************************************************** + * + * rwnx_cmds.c + * + * Handles queueing (push to IPC, ack/cfm from IPC) of commands issued to + * LMAC FW + * + * Copyright (C) RivieraWaves 2014-2019 + * + ****************************************************************************** + */ + +//#define CREATE_TRACE_POINTS +#include + +#include "rwnx_cmds.h" +#include "rwnx_defs.h" +#include "rwnx_strs.h" +#include "rwnx_events.h" +#include "aicwf_txrxif.h" +#include "rwnx_wakelock.h" +#ifdef AICWF_SDIO_SUPPORT +#include "aicwf_sdio.h" +#else +#include "aicwf_usb.h" +#endif +/** + * + */ +extern int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val); + +void rwnx_cmd_free(struct rwnx_cmd *cmd); + +static void cmd_dump(const struct rwnx_cmd *cmd) +{ + printk(KERN_CRIT "tkn[%d] flags:%04x result:%3d cmd:%4d-%-24s - reqcfm(%4d-%-s)\n", + cmd->tkn, cmd->flags, cmd->result, cmd->id, RWNX_ID2STR(cmd->id), + cmd->reqid, cmd->reqid != (lmac_msg_id_t)-1 ? RWNX_ID2STR(cmd->reqid) : "none"); +} + +/** + * + */ +static void cmd_complete(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd) +{ + //RWNX_DBG(RWNX_FN_ENTRY_STR); + lockdep_assert_held(&cmd_mgr->lock); + + //list_del(&cmd->list); + //cmd_mgr->queue_sz--; + + cmd->flags |= RWNX_CMD_FLAG_DONE; + if (cmd->flags & RWNX_CMD_FLAG_NONBLOCK) { + rwnx_cmd_free(cmd);//kfree(cmd);AIDEN + } else { + if (RWNX_CMD_WAIT_COMPLETE(cmd->flags)) { + cmd->result = 0; + complete(&cmd->complete); + } + } + + if(cmd_mgr->queue_sz == 0){ + rwnx_wakeup_unlock(g_rwnx_plat->usbdev->rwnx_hw->ws_tx); + } +} + +int cmd_mgr_queue_force_defer(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd) +{ + bool defer_push = false; + + RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifdef CREATE_TRACE_POINTS + trace_msg_send(cmd->id); +#endif + spin_lock_bh(&cmd_mgr->lock); + + if (cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) { + printk(KERN_CRIT"cmd queue crashed\n"); + cmd->result = -EPIPE; + spin_unlock_bh(&cmd_mgr->lock); + return -EPIPE; + } + + #ifndef CONFIG_RWNX_FHOST + if (!list_empty(&cmd_mgr->cmds)) { + if (cmd_mgr->queue_sz == cmd_mgr->max_queue_sz) { + printk(KERN_CRIT"Too many cmds (%d) already queued\n", + cmd_mgr->max_queue_sz); + cmd->result = -ENOMEM; + spin_unlock_bh(&cmd_mgr->lock); + return -ENOMEM; + } + } + #endif + + cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH; + defer_push = true; + + if (cmd->flags & RWNX_CMD_FLAG_REQ_CFM) + cmd->flags |= RWNX_CMD_FLAG_WAIT_CFM; + + cmd->tkn = cmd_mgr->next_tkn++; + cmd->result = -EINTR; + + if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK)) + init_completion(&cmd->complete); + + list_add_tail(&cmd->list, &cmd_mgr->cmds); + if(cmd_mgr->queue_sz == 0){ + rwnx_wakeup_lock(g_rwnx_plat->usbdev->rwnx_hw->ws_tx); + } + cmd_mgr->queue_sz++; + spin_unlock_bh(&cmd_mgr->lock); + + WAKE_CMD_WORK(cmd_mgr); + return 0; +} + +void rwnx_msg_free_(struct lmac_msg *msg); + + +static int cmd_mgr_queue(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd) +{ + int ret = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev = container_of(cmd_mgr, struct aic_sdio_dev, cmd_mgr); +#endif +#ifdef AICWF_USB_SUPPORT + + struct aic_usb_dev *usbdev = container_of(cmd_mgr, struct aic_usb_dev, cmd_mgr); +#endif + bool defer_push = false; + u8_l empty = 0; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifdef CREATE_TRACE_POINTS + trace_msg_send(cmd->id); +#endif + if(cmd->e2a_msg != NULL) { + do { + spin_lock_bh(&cmd_mgr->lock); + if(cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) { + AICWFDBG(LOGERROR, "cmd queue crashed\n"); + cmd->result = -EPIPE; + spin_unlock_bh(&cmd_mgr->lock); + return -EPIPE; + } + empty = list_empty(&cmd_mgr->cmds); + if(!empty) { + spin_unlock_bh(&cmd_mgr->lock); + if(in_softirq()) { + printk("in_softirq:check cmdqueue empty\n"); + mdelay(10); + } else { + printk("check cmdqueue empty\n"); + msleep(50); + } + } + } while(!empty);//wait for cmd queue empty + } else { + spin_lock_bh(&cmd_mgr->lock); + if (cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) { + printk(KERN_CRIT"cmd queue crashed\n"); + cmd->result = -EPIPE; + spin_unlock_bh(&cmd_mgr->lock); + return -EPIPE; + } + } + + #ifndef CONFIG_RWNX_FHOST + if (!list_empty(&cmd_mgr->cmds)) { + struct rwnx_cmd *last; + + if (cmd_mgr->queue_sz == cmd_mgr->max_queue_sz) { + printk(KERN_CRIT"Too many cmds (%d) already queued\n", + cmd_mgr->max_queue_sz); + cmd->result = -ENOMEM; + spin_unlock_bh(&cmd_mgr->lock); + return -ENOMEM; + } + last = list_entry(cmd_mgr->cmds.prev, struct rwnx_cmd, list); + if (last->flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_PUSH | RWNX_CMD_FLAG_WAIT_CFM)) { +#if 0 // queue even NONBLOCK command. + if (cmd->flags & RWNX_CMD_FLAG_NONBLOCK) { + printk(KERN_CRIT"cmd queue busy\n"); + cmd->result = -EBUSY; + spin_unlock_bh(&cmd_mgr->lock); + return -EBUSY; + } +#endif + cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH; + defer_push = true; + } + } + #endif + +#if 0 + cmd->flags |= RWNX_CMD_FLAG_WAIT_ACK; +#endif + if (cmd->flags & RWNX_CMD_FLAG_REQ_CFM) + cmd->flags |= RWNX_CMD_FLAG_WAIT_CFM; + + cmd->tkn = cmd_mgr->next_tkn++; + cmd->result = -EINTR; + + if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK)) + init_completion(&cmd->complete); + + list_add_tail(&cmd->list, &cmd_mgr->cmds); + if(cmd_mgr->queue_sz == 0){ + rwnx_wakeup_lock(g_rwnx_plat->usbdev->rwnx_hw->ws_tx); + } + cmd_mgr->queue_sz++; + + if(cmd->a2e_msg->id == ME_TRAFFIC_IND_REQ + #ifdef AICWF_ARP_OFFLOAD + || cmd->a2e_msg->id == MM_SET_ARPOFFLOAD_REQ + #endif + ) { + defer_push = true; + cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH; + //printk("defer push: tkn=%d\r\n", cmd->tkn); + } + + //spin_unlock_bh(&cmd_mgr->lock); + if (!defer_push) { + spin_unlock_bh(&cmd_mgr->lock); + AICWFDBG(LOGTRACE, "queue:id=%x, param_len=%u\n",cmd->a2e_msg->id, cmd->a2e_msg->param_len); + + #ifdef AICWF_SDIO_SUPPORT + aicwf_set_cmd_tx((void *)(sdiodev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len); + #else + aicwf_set_cmd_tx((void *)(usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len); + #endif + //rwnx_ipc_msg_push(rwnx_hw, cmd, RWNX_CMD_A2EMSG_LEN(cmd->a2e_msg)); + + kfree(cmd->a2e_msg); + } else { + if(cmd_mgr->queue_sz <= 1) { + spin_unlock_bh(&cmd_mgr->lock); + WAKE_CMD_WORK(cmd_mgr); + } else { + spin_unlock_bh(&cmd_mgr->lock); + } + return 0; + } + + if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK)) { + #ifdef CONFIG_RWNX_FHOST + if (wait_for_completion_killable(&cmd->complete)) { + cmd->result = -EINTR; + spin_lock_bh(&cmd_mgr->lock); + cmd_complete(cmd_mgr, cmd); + spin_unlock_bh(&cmd_mgr->lock); + /* TODO: kill the cmd at fw level */ + } + #else + unsigned long tout = msecs_to_jiffies(RWNX_80211_CMD_TIMEOUT_MS/*AIDEN workaround* cmd_mgr->queue_sz*/); + if (!wait_for_completion_killable_timeout(&cmd->complete, tout)) { + printk(KERN_CRIT"%s cmd timed-out cmd_mgr->queue_sz:%d\n", __func__,cmd_mgr->queue_sz); + #ifdef AICWF_SDIO_SUPPORT + ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_WAKEUP_REG, 2); + if (ret < 0) { + sdio_err("reg:%d write failed!\n", SDIOWIFI_WAKEUP_REG); + } + #endif + + cmd_dump(cmd); + spin_lock_bh(&cmd_mgr->lock); + + cmd_mgr->state = RWNX_CMD_MGR_STATE_CRASHED; + if (!(cmd->flags & RWNX_CMD_FLAG_DONE)) { + cmd->result = -ETIMEDOUT; + cmd_complete(cmd_mgr, cmd); + } + ret = -ETIMEDOUT; + spin_unlock_bh(&cmd_mgr->lock); + } + else{ + spin_lock_bh(&cmd_mgr->lock); + list_del(&cmd->list); + cmd_mgr->queue_sz--; + if(cmd_mgr->queue_sz == 0){ + rwnx_wakeup_unlock(usbdev->rwnx_hw->ws_tx); + } + spin_unlock_bh(&cmd_mgr->lock); + rwnx_cmd_free(cmd);//kfree(cmd);AIDEN + if(!list_empty(&cmd_mgr->cmds) && usbdev->state == USB_UP_ST) + WAKE_CMD_WORK(cmd_mgr); + } + #endif + } else { + cmd->result = 0; + } + return ret; +} + +/** + * + */ +static int cmd_mgr_llind(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd) +{ + struct rwnx_cmd *cur, *acked = NULL, *next = NULL; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + spin_lock_bh(&cmd_mgr->lock); + list_for_each_entry(cur, &cmd_mgr->cmds, list) { + if (!acked) { + if (cur->tkn == cmd->tkn) { + if (WARN_ON_ONCE(cur != cmd)) { + cmd_dump(cmd); + } + acked = cur; + continue; + } + } + if (cur->flags & RWNX_CMD_FLAG_WAIT_PUSH) { + next = cur; + break; + } + } + if (!acked) { + printk(KERN_CRIT "Error: acked cmd not found\n"); + } else { + cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK; + if (RWNX_CMD_WAIT_COMPLETE(cmd->flags)) + cmd_complete(cmd_mgr, cmd); + } + + if (next) { + #if 0 //there is no ack + struct rwnx_hw *rwnx_hw = container_of(cmd_mgr, struct rwnx_hw, cmd_mgr); + next->flags &= ~RWNX_CMD_FLAG_WAIT_PUSH; + rwnx_ipc_msg_push(rwnx_hw, next, RWNX_CMD_A2EMSG_LEN(next->a2e_msg)); + kfree(next->a2e_msg); + #endif + } + spin_unlock(&cmd_mgr->lock); + + return 0; +} + +void cmd_mgr_task_process(struct work_struct *work) +{ + struct rwnx_cmd_mgr *cmd_mgr = container_of(work, struct rwnx_cmd_mgr, cmdWork); + struct rwnx_cmd *cur, *next = NULL; + unsigned long tout; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + while(1) { + next = NULL; + spin_lock_bh(&cmd_mgr->lock); + + list_for_each_entry(cur, &cmd_mgr->cmds, list) { + if (cur->flags & RWNX_CMD_FLAG_WAIT_PUSH) { //just judge the first + next = cur; + } + break; + } + spin_unlock_bh(&cmd_mgr->lock); + + if(next == NULL) + break; + + if (next) { + #ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev = container_of(cmd_mgr, struct aic_sdio_dev, cmd_mgr); + #endif + #ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usbdev = container_of(cmd_mgr, struct aic_usb_dev, cmd_mgr); + #endif + next->flags &= ~RWNX_CMD_FLAG_WAIT_PUSH; + + //printk("cmd_process, cmd->id=%d, tkn=%d\r\n",next->reqid, next->tkn); + //rwnx_ipc_msg_push(rwnx_hw, next, RWNX_CMD_A2EMSG_LEN(next->a2e_msg)); +#ifdef AICWF_SDIO_SUPPORT + aicwf_set_cmd_tx((void *)(sdiodev), next->a2e_msg, sizeof(struct lmac_msg) + next->a2e_msg->param_len); +#else + aicwf_set_cmd_tx((void *)(usbdev), next->a2e_msg, sizeof(struct lmac_msg) + next->a2e_msg->param_len); +#endif + kfree(next->a2e_msg); + + tout = msecs_to_jiffies(RWNX_80211_CMD_TIMEOUT_MS * cmd_mgr->queue_sz); + if (!wait_for_completion_killable_timeout(&next->complete, tout)) { + printk(KERN_CRIT"%s cmd timed-out cmd_mgr->queue_sz:%d\n", __func__, cmd_mgr->queue_sz); + cmd_dump(next); + spin_lock_bh(&cmd_mgr->lock); + //AIDEN workaround + cmd_mgr->state = RWNX_CMD_MGR_STATE_CRASHED; + if (!(next->flags & RWNX_CMD_FLAG_DONE)) { + next->result = -ETIMEDOUT; + cmd_complete(cmd_mgr, next); + } + spin_unlock_bh(&cmd_mgr->lock); + } else { + spin_lock_bh(&cmd_mgr->lock); + list_del(&next->list); + cmd_mgr->queue_sz--; + if(cmd_mgr->queue_sz == 0){ + rwnx_wakeup_unlock(usbdev->rwnx_hw->ws_tx); + } + spin_unlock_bh(&cmd_mgr->lock); + rwnx_cmd_free(next);//kfree(next);AIDEN + } + } + } + +} + + +static int cmd_mgr_run_callback(struct rwnx_hw *rwnx_hw, struct rwnx_cmd *cmd, + struct rwnx_cmd_e2amsg *msg, msg_cb_fct cb) +{ + int res; + + if (! cb){ + return 0; + } + //RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifndef CONFIG_DEBUG_ATOMIC_SLEEP + //spin_lock_bh(&rwnx_hw->cb_lock); +#endif + res = cb(rwnx_hw, cmd, msg); +#ifndef CONFIG_DEBUG_ATOMIC_SLEEP + //spin_unlock_bh(&rwnx_hw->cb_lock); +#endif + + return res; +} + +/** + * + + */ +static int cmd_mgr_msgind(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd_e2amsg *msg, + msg_cb_fct cb) +{ +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev = container_of(cmd_mgr, struct aic_sdio_dev, cmd_mgr); + struct rwnx_hw *rwnx_hw = sdiodev->rwnx_hw; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usbdev = container_of(cmd_mgr, struct aic_usb_dev, cmd_mgr); + struct rwnx_hw *rwnx_hw = usbdev->rwnx_hw; +#endif + struct rwnx_cmd *cmd, *pos; + bool found = false; + + // RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifdef CREATE_TRACE_POINTS + trace_msg_recv(msg->id); +#endif + AICWFDBG(LOGTRACE, "%s cmd->id=%d\n", __func__, msg->id); + spin_lock_bh(&cmd_mgr->lock); + list_for_each_entry_safe(cmd, pos, &cmd_mgr->cmds, list) { + if (cmd->reqid == msg->id && + (cmd->flags & RWNX_CMD_FLAG_WAIT_CFM)) { + + if (!cmd_mgr_run_callback(rwnx_hw, cmd, msg, cb)) { + found = true; + cmd->flags &= ~RWNX_CMD_FLAG_WAIT_CFM; + + if (WARN((msg->param_len > RWNX_CMD_E2AMSG_LEN_MAX), + "Unexpect E2A msg len %d > %d\n", msg->param_len, + RWNX_CMD_E2AMSG_LEN_MAX)) { + msg->param_len = RWNX_CMD_E2AMSG_LEN_MAX; + } + + if (cmd->e2a_msg && msg->param_len) + memcpy(cmd->e2a_msg, &msg->param, msg->param_len); + + if (RWNX_CMD_WAIT_COMPLETE(cmd->flags)) + cmd_complete(cmd_mgr, cmd); + + break; + } + } + } + spin_unlock_bh(&cmd_mgr->lock); + + if (!found) + cmd_mgr_run_callback(rwnx_hw, NULL, msg, cb); + + return 0; +} + +/** + * + */ +static void cmd_mgr_print(struct rwnx_cmd_mgr *cmd_mgr) +{ + struct rwnx_cmd *cur; + + spin_lock_bh(&cmd_mgr->lock); + RWNX_DBG("q_sz/max: %2d / %2d - next tkn: %d\n", + cmd_mgr->queue_sz, cmd_mgr->max_queue_sz, + cmd_mgr->next_tkn); + list_for_each_entry(cur, &cmd_mgr->cmds, list) { + cmd_dump(cur); + } + spin_unlock_bh(&cmd_mgr->lock); +} + +static void cmd_mgr_drain(struct rwnx_cmd_mgr *cmd_mgr) +{ + struct rwnx_cmd *cur, *nxt; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + spin_lock_bh(&cmd_mgr->lock); + list_for_each_entry_safe(cur, nxt, &cmd_mgr->cmds, list) { + list_del(&cur->list); + //cmd_mgr->queue_sz--; + if (!(cur->flags & RWNX_CMD_FLAG_NONBLOCK)) + complete(&cur->complete); + } + spin_unlock_bh(&cmd_mgr->lock); + #if 0 + if(cmd_mgr->queue_sz == 0){ + rwnx_wakeup_unlock(g_rwnx_plat->usbdev->rwnx_hw->ws_tx); + } + #endif + +} + +void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); + + INIT_LIST_HEAD(&cmd_mgr->cmds); + cmd_mgr->state = RWNX_CMD_MGR_STATE_INITED; + spin_lock_init(&cmd_mgr->lock); + cmd_mgr->max_queue_sz = RWNX_CMD_MAX_QUEUED; + cmd_mgr->queue = &cmd_mgr_queue; + cmd_mgr->print = &cmd_mgr_print; + cmd_mgr->drain = &cmd_mgr_drain; + cmd_mgr->llind = &cmd_mgr_llind; + cmd_mgr->msgind = &cmd_mgr_msgind; + + INIT_WORK(&cmd_mgr->cmdWork, cmd_mgr_task_process); + cmd_mgr->cmd_wq = create_singlethread_workqueue("cmd_wq"); + if (!cmd_mgr->cmd_wq) { + txrx_err("insufficient memory to create cmd workqueue.\n"); + return; + } +} + +void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr) +{ + if(cmd_mgr->print && cmd_mgr->drain){ + cmd_mgr->print(cmd_mgr); + cmd_mgr->drain(cmd_mgr); + cmd_mgr->print(cmd_mgr); + flush_workqueue(cmd_mgr->cmd_wq); + destroy_workqueue(cmd_mgr->cmd_wq); + memset(cmd_mgr, 0, sizeof(*cmd_mgr)); + } +} + + +void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len) +{ + u8 *buffer = NULL; + u16 index = 0; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *)dev; + struct aicwf_bus *bus = sdiodev->bus_if; +#else + struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev; + struct aicwf_bus *bus = NULL; + if (!usbdev->state) { + printk("down msg \n"); + return; + } + bus = usbdev->bus_if; +#endif + buffer = bus->cmd_buf; + + memset(buffer, 0, CMD_BUF_MAX); + buffer[0] = (len+4) & 0x00ff; + buffer[1] = ((len+4) >> 8) &0x0f; + buffer[2] = 0x11; + buffer[3] = 0x0; + index += 4; + //there is a dummy word + index += 4; + + //make sure little endian + put_u16(&buffer[index], msg->id); + index += 2; + put_u16(&buffer[index], msg->dest_id); + index += 2; + put_u16(&buffer[index], msg->src_id); + index += 2; + put_u16(&buffer[index], msg->param_len); + index += 2; + memcpy(&buffer[index], (u8 *)msg->param, msg->param_len); + + aicwf_bus_txmsg(bus, buffer, len + 8); +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cmds.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cmds.h new file mode 100644 index 0000000..8862a86 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_cmds.h @@ -0,0 +1,120 @@ +/** + ****************************************************************************** + * + * rwnx_cmds.h + * + * Copyright (C) RivieraWaves 2014-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_CMDS_H_ +#define _RWNX_CMDS_H_ + +#include +#include +#include +#include "lmac_msg.h" + +#ifdef CONFIG_RWNX_SDM +#define RWNX_80211_CMD_TIMEOUT_MS (20 * 300) +#elif defined(CONFIG_RWNX_FHOST) +#define RWNX_80211_CMD_TIMEOUT_MS (10000) +#else +#define RWNX_80211_CMD_TIMEOUT_MS 4000//500//300 +#endif + +#define RWNX_CMD_FLAG_NONBLOCK BIT(0) +#define RWNX_CMD_FLAG_REQ_CFM BIT(1) +#define RWNX_CMD_FLAG_WAIT_PUSH BIT(2) +#define RWNX_CMD_FLAG_WAIT_ACK BIT(3) +#define RWNX_CMD_FLAG_WAIT_CFM BIT(4) +#define RWNX_CMD_FLAG_DONE BIT(5) +/* ATM IPC design makes it possible to get the CFM before the ACK, + * otherwise this could have simply been a state enum */ +#define RWNX_CMD_WAIT_COMPLETE(flags) \ + (!(flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_CFM))) + +#define RWNX_CMD_MAX_QUEUED 16//8 AIDEN + +#ifdef CONFIG_RWNX_FHOST +#include "ipc_fhost.h" +#define rwnx_cmd_e2amsg ipc_fhost_msg +#define rwnx_cmd_a2emsg ipc_fhost_msg +#define RWNX_CMD_A2EMSG_LEN(m) (m->param_len) +#define RWNX_CMD_E2AMSG_LEN_MAX IPC_FHOST_MSG_BUF_SIZE +struct rwnx_term_stream; + +#else /* !CONFIG_RWNX_FHOST*/ +#include "ipc_shared.h" +#define rwnx_cmd_e2amsg ipc_e2a_msg +#define rwnx_cmd_a2emsg lmac_msg +#define RWNX_CMD_A2EMSG_LEN(m) (sizeof(struct lmac_msg) + m->param_len) +#define RWNX_CMD_E2AMSG_LEN_MAX (IPC_E2A_MSG_PARAM_SIZE * 4) + +#endif /* CONFIG_RWNX_FHOST*/ + +struct rwnx_hw; +struct rwnx_cmd; +typedef int (*msg_cb_fct)(struct rwnx_hw *rwnx_hw, struct rwnx_cmd *cmd, + struct rwnx_cmd_e2amsg *msg); +static inline void put_u16(u8 *buf, u16 data) +{ + buf[0] = (u8)(data&0x00ff); + buf[1] = (u8)((data >> 8)&0x00ff); +} + +enum rwnx_cmd_mgr_state { + RWNX_CMD_MGR_STATE_DEINIT, + RWNX_CMD_MGR_STATE_INITED, + RWNX_CMD_MGR_STATE_CRASHED, +}; + +struct rwnx_cmd { + struct list_head list; + lmac_msg_id_t id; + lmac_msg_id_t reqid; + struct rwnx_cmd_a2emsg *a2e_msg; + char *e2a_msg; + u32 tkn; + u16 flags; + + struct completion complete; + u32 result; + u8 used; + int array_id; + #ifdef CONFIG_RWNX_FHOST + struct rwnx_term_stream *stream; + #endif +}; + +struct rwnx_cmd_mgr { + enum rwnx_cmd_mgr_state state; + spinlock_t lock; + u32 next_tkn; + u32 queue_sz; + u32 max_queue_sz; + + struct list_head cmds; + + int (*queue)(struct rwnx_cmd_mgr *, struct rwnx_cmd *); + int (*llind)(struct rwnx_cmd_mgr *, struct rwnx_cmd *); + int (*msgind)(struct rwnx_cmd_mgr *, struct rwnx_cmd_e2amsg *, msg_cb_fct); + void (*print)(struct rwnx_cmd_mgr *); + void (*drain)(struct rwnx_cmd_mgr *); + + struct work_struct cmdWork; + struct workqueue_struct *cmd_wq; +}; + +#define WAKE_CMD_WORK(cmd_mgr) \ + do { \ + queue_work((cmd_mgr)->cmd_wq, &cmd_mgr->cmdWork); \ + } while (0) + +void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr); +void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr); +int cmd_mgr_queue_force_defer(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd); +void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len); + +#endif /* _RWNX_CMDS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_compat.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_compat.h new file mode 100644 index 0000000..7c26d0f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_compat.h @@ -0,0 +1,433 @@ +/** + ****************************************************************************** + * + * @file rwnx_compat.h + * + * Ensure driver compilation for linux 3.16 to 3.19 + * + * To avoid too many #if LINUX_VERSION_CODE if the code, when prototype change + * between different kernel version: + * - For external function, define a macro whose name is the function name with + * _compat suffix and prototype (actually the number of parameter) of the + * latest version. Then latest version this macro simply call the function + * and for older kernel version it call the function adapting the api. + * - For internal function (e.g. cfg80211_ops) do the same but the macro name + * doesn't need to have the _compat suffix when the function is not used + * directly by the driver + * + * Copyright (C) RivieraWaves 2018 + * + ****************************************************************************** + */ +#ifndef _RWNX_COMPAT_H_ +#define _RWNX_COMPAT_H_ +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) +#error "Minimum kernel version supported is 3.10" +#endif + +/* Generic */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) +#define __bf_shf(x) (__builtin_ffsll(x) - 1) +#define FIELD_PREP(_mask, _val) \ + (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) +#else +#include +#endif + +/* CFG80211 */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 18, 0) +#define IEEE80211_MAX_AMPDU_BUF IEEE80211_MAX_AMPDU_BUF_HE +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +#define IEEE80211_RADIOTAP_HE 23 +#define IEEE80211_RADIOTAP_HE_MU 24 + +struct ieee80211_radiotap_he { + __le16 data1, data2, data3, data4, data5, data6; +}; + +enum ieee80211_radiotap_he_bits { + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3, + + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008, + IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020, + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100, + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000, + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001, + IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002, + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008, + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020, + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f, + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040, + IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080, + IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00, + IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000, + IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000, + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000, + IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f, + IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000, + + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10, + + IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030, + IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0, + IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1, + IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2, + + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700, + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000, + IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000, + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f, + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010, + IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00, + IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000, +}; + +struct ieee80211_radiotap_he_mu { + __le16 flags1, flags2; + u8 ru_ch1[4]; + u8 ru_ch2[4]; +}; + +enum ieee80211_radiotap_he_mu_bits { + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000, + + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800, +}; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +#define rwnx_cfg80211_add_iface(wiphy, name, name_assign_type, type, params) \ + rwnx_cfg80211_add_iface(wiphy, name, type, u32 *flags, params) +#else +#define rwnx_cfg80211_add_iface(wiphy, name, name_assign_type, type, params) \ + rwnx_cfg80211_add_iface(wiphy, name, name_assign_type, type, u32 *flags, params) +#endif + +#define rwnx_cfg80211_change_iface(wiphy, dev, type, params) \ + rwnx_cfg80211_change_iface(wiphy, dev, type, u32 *flags, params) + +#define CCFS0(vht) vht->center_freq_seg1_idx +#define CCFS1(vht) vht->center_freq_seg2_idx + +#else +#define CCFS0(vht) vht->center_freq_seg0_idx +#define CCFS1(vht) vht->center_freq_seg1_idx + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#define cfg80211_cqm_rssi_notify(dev, event, level, gfp) \ + cfg80211_cqm_rssi_notify(dev, event, gfp) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) +#define ieee80211_amsdu_to_8023s(skb, list, addr, iftype, extra_headroom, check_da, check_sa) \ + ieee80211_amsdu_to_8023s(skb, list, addr, iftype, extra_headroom, false) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) +#define NUM_NL80211_BANDS IEEE80211_NUM_BANDS +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +#define cfg80211_disconnected(dev, reason, ie, len, local, gfp) \ + cfg80211_disconnected(dev, reason, ie, len, gfp) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) && !(defined CONFIG_VENDOR_RWNX) +#define ieee80211_chandef_to_operating_class(chan_def, op_class) 0 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) +#define SURVEY_INFO_TIME SURVEY_INFO_CHANNEL_TIME +#define SURVEY_INFO_TIME_BUSY SURVEY_INFO_CHANNEL_TIME_BUSY +#define SURVEY_INFO_TIME_EXT_BUSY SURVEY_INFO_CHANNEL_TIME_EXT_BUSY +#define SURVEY_INFO_TIME_RX SURVEY_INFO_CHANNEL_TIME_RX +#define SURVEY_INFO_TIME_TX SURVEY_INFO_CHANNEL_TIME_TX + +#define SURVEY_TIME(s) s->channel_time +#define SURVEY_TIME_BUSY(s) s->channel_time_busy +#else +#define SURVEY_TIME(s) s->time +#define SURVEY_TIME_BUSY(s) s->time_busy +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) +#define cfg80211_ch_switch_started_notify(dev, chandef, count) + +#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) +#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) +#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) +#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) +#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) +#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) +#define NL80211_FEATURE_TDLS_CHANNEL_SWITCH 0 + +#define STA_TDLS_INITIATOR(sta) 0 + +#define REGULATORY_IGNORE_STALE_KICKOFF 0 +#else +#define STA_TDLS_INITIATOR(sta) sta->tdls_initiator + +#ifndef REGULATORY_IGNORE_STALE_KICKOFF +#define REGULATORY_IGNORE_STALE_KICKOFF 0 +#endif + +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) +#define cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, flags) \ + cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, flags, GFP_ATOMIC) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) +#define cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, flags) \ + cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, GFP_ATOMIC) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + +#if 0 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) +#define rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, peer_capability, initiator, buf, len) \ + rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, peer_capability, buf, len) +#else +#define rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, peer_capability, initiator, buf, len) \ + rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, buf, len) +#endif +#endif + +#include + +struct ieee80211_wmm_ac_param { + u8 aci_aifsn; /* AIFSN, ACM, ACI */ + u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */ + __le16 txop_limit; +} __packed; + +struct ieee80211_wmm_param_ie { + u8 element_id; /* Element ID: 221 (0xdd); */ + u8 len; /* Length: 24 */ + /* required fields for WMM version 1 */ + u8 oui[3]; /* 00:50:f2 */ + u8 oui_type; /* 2 */ + u8 oui_subtype; /* 1 */ + u8 version; /* 1 for WMM version 1.0 */ + u8 qos_info; /* AP/STA specific QoS info */ + u8 reserved; /* 0 */ + /* AC_BE, AC_BK, AC_VI, AC_VO */ + struct ieee80211_wmm_ac_param ac[4]; +} __packed; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +enum { + IEEE80211_HE_MCS_SUPPORT_0_7 = 0, + IEEE80211_HE_MCS_SUPPORT_0_9 = 1, + IEEE80211_HE_MCS_SUPPORT_0_11 = 2, + IEEE80211_HE_MCS_NOT_SUPPORTED = 3, +}; +#endif + +/* MAC80211 */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +#define rwnx_ops_mgd_prepare_tx(hw, vif, duration) \ + rwnx_ops_mgd_prepare_tx(hw, vif) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) + +#define RX_ENC_HT(s) s->flag |= RX_FLAG_HT +#define RX_ENC_HT_GF(s) s->flag |= (RX_FLAG_HT | RX_FLAG_HT_GF) +#define RX_ENC_VHT(s) s->flag |= RX_FLAG_HT +#define RX_ENC_HE(s) s->flag |= RX_FLAG_HT +#define RX_ENC_FLAG_SHORT_GI(s) s->flag |= RX_FLAG_SHORT_GI +#define RX_ENC_FLAG_SHORT_PRE(s) s->flag |= RX_FLAG_SHORTPRE +#define RX_ENC_FLAG_LDPC(s) s->flag |= RX_FLAG_LDPC +#define RX_BW_40MHZ(s) s->flag |= RX_FLAG_40MHZ +#define RX_BW_80MHZ(s) s->vht_flag |= RX_VHT_FLAG_80MHZ +#define RX_BW_160MHZ(s) s->vht_flag |= RX_VHT_FLAG_160MHZ +#define RX_NSS(s) s->vht_nss + +#else +#define RX_ENC_HT(s) s->encoding = RX_ENC_HT +#define RX_ENC_HT_GF(s) { s->encoding = RX_ENC_HT; \ + s->enc_flags |= RX_ENC_FLAG_HT_GF; } +#define RX_ENC_VHT(s) s->encoding = RX_ENC_VHT +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +#define RX_ENC_HE(s) s->encoding = RX_ENC_VHT +#else +#define RX_ENC_HE(s) s->encoding = RX_ENC_HE +#endif +#define RX_ENC_FLAG_SHORT_GI(s) s->enc_flags |= RX_ENC_FLAG_SHORT_GI +#define RX_ENC_FLAG_SHORT_PRE(s) s->enc_flags |= RX_ENC_FLAG_SHORTPRE +#define RX_ENC_FLAG_LDPC(s) s->enc_flags |= RX_ENC_FLAG_LDPC +#define RX_BW_40MHZ(s) s->bw = RATE_INFO_BW_40 +#define RX_BW_80MHZ(s) s->bw = RATE_INFO_BW_80 +#define RX_BW_160MHZ(s) s->bw = RATE_INFO_BW_160 +#define RX_NSS(s) s->nss + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#define ieee80211_cqm_rssi_notify(vif, event, level, gfp) \ + ieee80211_cqm_rssi_notify(vif, event, gfp) +#endif + +#ifndef CONFIG_VENDOR_RWNX_AMSDUS_TX +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) +#define rwnx_ops_ampdu_action(hw, vif, params) \ + rwnx_ops_ampdu_action(hw, vif, enum ieee80211_ampdu_mlme_action action, \ + struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +#define rwnx_ops_ampdu_action(hw, vif, params) \ + rwnx_ops_ampdu_action(hw, vif, enum ieee80211_ampdu_mlme_action action, \ + struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, \ + bool amsdu) +#endif +#endif /* CONFIG_VENDOR_RWNX_AMSDUS_TX */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +#define IEEE80211_HW_SUPPORT_FAST_XMIT 0 +#define ieee80211_hw_check(hw, feat) (hw->flags & IEEE80211_HW_##feat) +#define ieee80211_hw_set(hw, feat) {hw->flags |= IEEE80211_HW_##feat;} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) +#define rwnx_ops_sw_scan_start(hw, vif, mac_addr) \ + rwnx_ops_sw_scan_start(hw) +#define rwnx_ops_sw_scan_complete(hw, vif) \ + rwnx_ops_sw_scan_complete(hw) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define rwnx_ops_hw_scan(hw, vif, hw_req) \ + rwnx_ops_hw_scan(hw, vif, struct cfg80211_scan_request *req) +#endif + +/* NET */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) +#define rwnx_select_queue(dev, skb, sb_dev) \ + rwnx_select_queue(dev, skb) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +#define rwnx_select_queue(dev, skb, sb_dev) \ + rwnx_select_queue(dev, skb, void *accel_priv, select_queue_fallback_t fallback) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) +#define rwnx_select_queue(dev, skb, sb_dev) \ + rwnx_select_queue(dev, skb, sb_dev, select_queue_fallback_t fallback) +#else +#define rwnx_select_queue(dev, skb, sb_dev) \ + rwnx_select_queue(dev, skb, sb_dev) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) && !(defined CONFIG_VENDOR_RWNX) +#define sk_pacing_shift_update(sk, shift) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define alloc_netdev_mqs(size, name, assign, setup, txqs, rxqs) \ + alloc_netdev_mqs(size, name, setup, txqs, rxqs) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define NET_NAME_UNKNOWN 0 +#endif + +/* TRACE */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +#define trace_print_symbols_seq ftrace_print_symbols_seq +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define trace_seq_buffer_ptr(p) p->buffer + p->len +#endif + +/* TIME */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) +#define time64_to_tm(t, o, tm) time_to_tm((time_t)t, o, tm) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) +#define ktime_get_real_seconds get_seconds +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +typedef __s64 time64_t; +#endif + +#endif /* _RWNX_COMPAT_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_debugfs.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_debugfs.c new file mode 100644 index 0000000..7d4c82e --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_debugfs.c @@ -0,0 +1,2739 @@ +/** + ****************************************************************************** + * + * @file rwnx_debugfs.c + * + * @brief Definition of debugfs entries + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + + +#include +#include +#include +#include +#include + +#include "rwnx_debugfs.h" +#include "rwnx_msg_tx.h" +#include "rwnx_radar.h" +#include "rwnx_tx.h" + +#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_RWNX_FULLMAC +static ssize_t rwnx_dbgfs_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char *buf; + int ret; + int i, skipped; + ssize_t read; + int bufsz = (NX_TXQ_CNT) * 20 + (ARRAY_SIZE(priv->stats.amsdus_rx) + 1) * 40 + + (ARRAY_SIZE(priv->stats.ampdus_tx) * 30); + + if (*ppos) + return 0; + + buf = kmalloc(bufsz, GFP_ATOMIC); + if (buf == NULL) + return 0; + + ret = scnprintf(buf, bufsz, "TXQs CFM balances "); + for (i = 0; i < NX_TXQ_CNT; i++) + ret += scnprintf(&buf[ret], bufsz - ret, + " [%1d]:%3d", i, + priv->stats.cfm_balance[i]); + + ret += scnprintf(&buf[ret], bufsz - ret, "\n"); + +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + ret += scnprintf(&buf[ret], bufsz - ret, + "\nAMSDU[len] done failed received\n"); + for (i = skipped = 0; i < NX_TX_PAYLOAD_MAX; i++) { + if (priv->stats.amsdus[i].done) { + per = DIV_ROUND_UP((priv->stats.amsdus[i].failed) * + 100, priv->stats.amsdus[i].done); + } else if (priv->stats.amsdus_rx[i]) { + per = 0; + } else { + per = 0; + skipped = 1; + continue; + } + if (skipped) { + ret += scnprintf(&buf[ret], bufsz - ret, " ...\n"); + skipped = 0; + } + + ret += scnprintf(&buf[ret], bufsz - ret, + " [%2d] %10d %8d(%3d%%) %10d\n", i ? i + 1 : i, + priv->stats.amsdus[i].done, + priv->stats.amsdus[i].failed, per, + priv->stats.amsdus_rx[i]); + } + + for (; i < ARRAY_SIZE(priv->stats.amsdus_rx); i++) { + if (!priv->stats.amsdus_rx[i]) { + skipped = 1; + continue; + } + if (skipped) { + ret += scnprintf(&buf[ret], bufsz - ret, " ...\n"); + skipped = 0; + } + + ret += scnprintf(&buf[ret], bufsz - ret, + " [%2d] %10d\n", + i + 1, priv->stats.amsdus_rx[i]); + } +#else + ret += scnprintf(&buf[ret], bufsz - ret, + "\nAMSDU[len] received\n"); + for (i = skipped = 0; i < ARRAY_SIZE(priv->stats.amsdus_rx); i++) { + if (!priv->stats.amsdus_rx[i]) { + skipped = 1; + continue; + } + if (skipped) { + ret += scnprintf(&buf[ret], bufsz - ret, + " ...\n"); + skipped = 0; + } + + ret += scnprintf(&buf[ret], bufsz - ret, + " [%2d] %10d\n", + i + 1, priv->stats.amsdus_rx[i]); + } + +#endif /* CONFIG_RWNX_SPLIT_TX_BUF */ + + ret += scnprintf(&buf[ret], bufsz - ret, + "\nAMPDU[len] done received\n"); + for (i = skipped = 0; i < ARRAY_SIZE(priv->stats.ampdus_tx); i++) { + if (!priv->stats.ampdus_tx[i] && !priv->stats.ampdus_rx[i]) { + skipped = 1; + continue; + } + if (skipped) { + ret += scnprintf(&buf[ret], bufsz - ret, + " ...\n"); + skipped = 0; + } + + ret += scnprintf(&buf[ret], bufsz - ret, + " [%2d] %9d %9d\n", i ? i + 1 : i, + priv->stats.ampdus_tx[i], priv->stats.ampdus_rx[i]); + } + + ret += scnprintf(&buf[ret], bufsz - ret, + "#mpdu missed %9d\n", + priv->stats.ampdus_rx_miss); + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + kfree(buf); + + return read; +} +#endif /* CONFIG_RWNX_FULLMAC */ + +static ssize_t rwnx_dbgfs_stats_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + + /* Prevent from interrupt preemption as these statistics are updated under + * interrupt */ + spin_lock_bh(&priv->tx_lock); + + memset(&priv->stats, 0, sizeof(priv->stats)); + + spin_unlock_bh(&priv->tx_lock); + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(stats); + +#define TXQ_STA_PREF "tid|" +#define TXQ_STA_PREF_FMT "%3d|" + +#ifdef CONFIG_RWNX_FULLMAC +#define TXQ_VIF_PREF "type|" +#define TXQ_VIF_PREF_FMT "%4s|" +#else +#define TXQ_VIF_PREF "AC|" +#define TXQ_VIF_PREF_FMT "%2s|" +#endif /* CONFIG_RWNX_FULLMAC */ + +#define TXQ_HDR "idx| status|credit|ready|retry" +#define TXQ_HDR_FMT "%3d|%s%s%s%s%s%s%s|%6d|%5d|%5d" + +#ifdef CONFIG_RWNX_AMSDUS_TX +#ifdef CONFIG_RWNX_FULLMAC +#define TXQ_HDR_SUFF "|amsdu" +#define TXQ_HDR_SUFF_FMT "|%5d" +#else +#define TXQ_HDR_SUFF "|amsdu-ht|amdsu-vht" +#define TXQ_HDR_SUFF_FMT "|%8d|%9d" +#endif /* CONFIG_RWNX_FULLMAC */ +#else +#define TXQ_HDR_SUFF "" +#define TXQ_HDR_SUF_FMT "" +#endif /* CONFIG_RWNX_AMSDUS_TX */ + +#define TXQ_HDR_MAX_LEN (sizeof(TXQ_STA_PREF) + sizeof(TXQ_HDR) + sizeof(TXQ_HDR_SUFF) + 1) + +#ifdef CONFIG_RWNX_FULLMAC +#define PS_HDR "Legacy PS: ready=%d, sp=%d / UAPSD: ready=%d, sp=%d" +#define PS_HDR_LEGACY "Legacy PS: ready=%d, sp=%d" +#define PS_HDR_UAPSD "UAPSD: ready=%d, sp=%d" +#define PS_HDR_MAX_LEN sizeof("Legacy PS: ready=xxx, sp=xxx / UAPSD: ready=xxx, sp=xxx\n") +#else +#define PS_HDR "" +#define PS_HDR_MAX_LEN 0 +#endif /* CONFIG_RWNX_FULLMAC */ + +#define STA_HDR "** STA %d (%pM)\n" +#define STA_HDR_MAX_LEN (sizeof("- STA xx (xx:xx:xx:xx:xx:xx)\n") + PS_HDR_MAX_LEN) + +#ifdef CONFIG_RWNX_FULLMAC +#define VIF_HDR "* VIF [%d] %s\n" +#define VIF_HDR_MAX_LEN (sizeof(VIF_HDR) + IFNAMSIZ) +#else +#define VIF_HDR "* VIF [%d]\n" +#define VIF_HDR_MAX_LEN sizeof(VIF_HDR) +#endif + + +#ifdef CONFIG_RWNX_AMSDUS_TX + +#ifdef CONFIG_RWNX_FULLMAC +#define VIF_SEP "---------------------------------------\n" +#else +#define VIF_SEP "----------------------------------------------------\n" +#endif /* CONFIG_RWNX_FULLMAC */ + +#else /* ! CONFIG_RWNX_AMSDUS_TX */ +#define VIF_SEP "---------------------------------\n" +#endif /* CONFIG_RWNX_AMSDUS_TX*/ + +#define VIF_SEP_LEN sizeof(VIF_SEP) + +#define CAPTION "status: L=in hwq list, F=stop full, P=stop sta PS, V=stop vif PS, C=stop channel, S=stop CSA, M=stop MU" +#define CAPTION_LEN sizeof(CAPTION) + +#define STA_TXQ 0 +#define VIF_TXQ 1 + +static int rwnx_dbgfs_txq(char *buf, size_t size, struct rwnx_txq *txq, int type, int tid, char *name) +{ + int res, idx = 0; + + if (type == STA_TXQ) { + res = scnprintf(&buf[idx], size, TXQ_STA_PREF_FMT, tid); + idx += res; + size -= res; + } else { + res = scnprintf(&buf[idx], size, TXQ_VIF_PREF_FMT, name); + idx += res; + size -= res; + } + + res = scnprintf(&buf[idx], size, TXQ_HDR_FMT, txq->idx, + (txq->status & RWNX_TXQ_IN_HWQ_LIST) ? "L" : " ", + (txq->status & RWNX_TXQ_STOP_FULL) ? "F" : " ", + (txq->status & RWNX_TXQ_STOP_STA_PS) ? "P" : " ", + (txq->status & RWNX_TXQ_STOP_VIF_PS) ? "V" : " ", + (txq->status & RWNX_TXQ_STOP_CHAN) ? "C" : " ", + (txq->status & RWNX_TXQ_STOP_CSA) ? "S" : " ", + (txq->status & RWNX_TXQ_STOP_MU_POS) ? "M" : " ", + txq->credits, skb_queue_len(&txq->sk_list), + txq->nb_retry); + idx += res; + size -= res; + +#ifdef CONFIG_RWNX_AMSDUS_TX + if (type == STA_TXQ) { + res = scnprintf(&buf[idx], size, TXQ_HDR_SUFF_FMT, +#ifdef CONFIG_RWNX_FULLMAC + txq->amsdu_len +#else + txq->amsdu_ht_len_cap, txq->amsdu_vht_len_cap +#endif /* CONFIG_RWNX_FULLMAC */ + ); + idx += res; + size -= res; + } +#endif + + res = scnprintf(&buf[idx], size, "\n"); + idx += res; + size -= res; + + return idx; +} + +static int rwnx_dbgfs_txq_sta(char *buf, size_t size, struct rwnx_sta *rwnx_sta, + struct rwnx_hw *rwnx_hw) +{ + int tid, res, idx = 0; + struct rwnx_txq *txq; + + res = scnprintf(&buf[idx], size, "\n" STA_HDR, + rwnx_sta->sta_idx, +#ifdef CONFIG_RWNX_FULLMAC + rwnx_sta->mac_addr +#endif /* CONFIG_RWNX_FULLMAC */ + ); + idx += res; + size -= res; + +#ifdef CONFIG_RWNX_FULLMAC + if (rwnx_sta->ps.active) { + if (rwnx_sta->uapsd_tids && + (rwnx_sta->uapsd_tids == ((1 << NX_NB_TXQ_PER_STA) - 1))) + res = scnprintf(&buf[idx], size, PS_HDR_UAPSD "\n", + rwnx_sta->ps.pkt_ready[UAPSD_ID], + rwnx_sta->ps.sp_cnt[UAPSD_ID]); + else if (rwnx_sta->uapsd_tids) + res = scnprintf(&buf[idx], size, PS_HDR "\n", + rwnx_sta->ps.pkt_ready[LEGACY_PS_ID], + rwnx_sta->ps.sp_cnt[LEGACY_PS_ID], + rwnx_sta->ps.pkt_ready[UAPSD_ID], + rwnx_sta->ps.sp_cnt[UAPSD_ID]); + else + res = scnprintf(&buf[idx], size, PS_HDR_LEGACY "\n", + rwnx_sta->ps.pkt_ready[LEGACY_PS_ID], + rwnx_sta->ps.sp_cnt[LEGACY_PS_ID]); + idx += res; + size -= res; + } else { + res = scnprintf(&buf[idx], size, "\n"); + idx += res; + size -= res; + } +#endif /* CONFIG_RWNX_FULLMAC */ + + + res = scnprintf(&buf[idx], size, TXQ_STA_PREF TXQ_HDR TXQ_HDR_SUFF "\n"); + idx += res; + size -= res; + + + foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) { + res = rwnx_dbgfs_txq(&buf[idx], size, txq, STA_TXQ, tid, NULL); + idx += res; + size -= res; + } + + return idx; +} + +static int rwnx_dbgfs_txq_vif(char *buf, size_t size, struct rwnx_vif *rwnx_vif, + struct rwnx_hw *rwnx_hw) +{ + int res, idx = 0; + struct rwnx_txq *txq; + struct rwnx_sta *rwnx_sta; + +#ifdef CONFIG_RWNX_FULLMAC + res = scnprintf(&buf[idx], size, VIF_HDR, rwnx_vif->vif_index, rwnx_vif->ndev->name); + idx += res; + size -= res; + if (!rwnx_vif->up || rwnx_vif->ndev == NULL) + return idx; + +#else + int ac; + char ac_name[2] = {'0', '\0'}; + + res = scnprintf(&buf[idx], size, VIF_HDR, rwnx_vif->vif_index); + idx += res; + size -= res; +#endif /* CONFIG_RWNX_FULLMAC */ + +#ifdef CONFIG_RWNX_FULLMAC + if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP || + RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO || + RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT) { + res = scnprintf(&buf[idx], size, TXQ_VIF_PREF TXQ_HDR "\n"); + idx += res; + size -= res; + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + res = rwnx_dbgfs_txq(&buf[idx], size, txq, VIF_TXQ, 0, "UNK"); + idx += res; + size -= res; + txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE); + res = rwnx_dbgfs_txq(&buf[idx], size, txq, VIF_TXQ, 0, "BCMC"); + idx += res; + size -= res; + rwnx_sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index]; + if (rwnx_sta->ps.active) { + res = scnprintf(&buf[idx], size, PS_HDR_LEGACY "\n", + rwnx_sta->ps.sp_cnt[LEGACY_PS_ID], + rwnx_sta->ps.sp_cnt[LEGACY_PS_ID]); + idx += res; + size -= res; + } else { + res = scnprintf(&buf[idx], size, "\n"); + idx += res; + size -= res; + } + + list_for_each_entry(rwnx_sta, &rwnx_vif->ap.sta_list, list) { + res = rwnx_dbgfs_txq_sta(&buf[idx], size, rwnx_sta, rwnx_hw); + idx += res; + size -= res; + } + } else if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || + RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) { + if (rwnx_vif->sta.ap) { + res = rwnx_dbgfs_txq_sta(&buf[idx], size, rwnx_vif->sta.ap, rwnx_hw); + idx += res; + size -= res; + } + } + +#else + res = scnprintf(&buf[idx], size, TXQ_VIF_PREF TXQ_HDR "\n"); + idx += res; + size -= res; + + foreach_vif_txq(rwnx_vif, txq, ac) { + ac_name[0]++; + res = rwnx_dbgfs_txq(&buf[idx], size, txq, VIF_TXQ, 0, ac_name); + idx += res; + size -= res; + } + + list_for_each_entry(rwnx_sta, &rwnx_vif->stations, list) { + res = rwnx_dbgfs_txq_sta(&buf[idx], size, rwnx_sta, rwnx_hw); + idx += res; + size -= res; + } +#endif /* CONFIG_RWNX_FULLMAC */ + return idx; +} + +static ssize_t rwnx_dbgfs_txq_read(struct file *file , + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *rwnx_hw = file->private_data; + struct rwnx_vif *vif; + char *buf; + int idx, res; + ssize_t read; + size_t bufsz = ((NX_VIRT_DEV_MAX * (VIF_HDR_MAX_LEN + 2 * VIF_SEP_LEN)) + + (NX_REMOTE_STA_MAX * STA_HDR_MAX_LEN) + + ((NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX + NX_NB_TXQ) * + TXQ_HDR_MAX_LEN) + CAPTION_LEN); + + /* everything is read in one go */ + if (*ppos) + return 0; + + bufsz = min_t(size_t, bufsz, count); + + buf = kmalloc(bufsz, GFP_ATOMIC); + if (buf == NULL) + return 0; + + bufsz--; + idx = 0; + + res = scnprintf(&buf[idx], bufsz, CAPTION); + idx += res; + bufsz -= res; + + //spin_lock_bh(&rwnx_hw->tx_lock); + list_for_each_entry(vif, &rwnx_hw->vifs, list) { + res = scnprintf(&buf[idx], bufsz, "\n"VIF_SEP); + idx += res; + bufsz -= res; + res = rwnx_dbgfs_txq_vif(&buf[idx], bufsz, vif, rwnx_hw); + idx += res; + bufsz -= res; + res = scnprintf(&buf[idx], bufsz, VIF_SEP); + idx += res; + bufsz -= res; + } + //spin_unlock_bh(&rwnx_hw->tx_lock); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, idx); + kfree(buf); + + return read; +} +DEBUGFS_READ_FILE_OPS(txq); + +static ssize_t rwnx_dbgfs_acsinfo_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + #ifdef CONFIG_RWNX_FULLMAC + struct wiphy *wiphy = priv->wiphy; + #endif //CONFIG_RWNX_FULLMAC + //char buf[(SCAN_CHANNEL_MAX + 1) * 43]; + char *buf = NULL; + ssize_t size = 0; + int survey_cnt = 0; + int len = 0; + int band, chan_cnt; + int band_max = NL80211_BAND_5GHZ; + + buf = (char*)kmalloc(sizeof(char) * ((SCAN_CHANNEL_MAX + 1) * 43), GFP_KERNEL); + memset(buf, 0, ((SCAN_CHANNEL_MAX + 1) * 43)); + + if (priv->band_5g_support){ + band_max = NL80211_BAND_5GHZ + 1; + } + + mutex_lock(&priv->dbgdump_elem.mutex); + + len += scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "FREQ TIME(ms) BUSY(ms) NOISE(dBm)\n"); + + + //#ifdef USE_5G + //for (band = NL80211_BAND_2GHZ; band <= NL80211_BAND_5GHZ; band++) { + //#else + //for (band = NL80211_BAND_2GHZ; band < NL80211_BAND_5GHZ; band++) { + //#endif + for (band = NL80211_BAND_2GHZ; band < band_max; band++) { + for (chan_cnt = 0; chan_cnt < wiphy->bands[band]->n_channels; chan_cnt++) { + struct rwnx_survey_info *p_survey_info = &priv->survey[survey_cnt]; + struct ieee80211_channel *p_chan = &wiphy->bands[band]->channels[chan_cnt]; + + if (p_survey_info->filled) { + len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - len - 1, count), + "%d %03d %03d %d\n", + p_chan->center_freq, + p_survey_info->chan_time_ms, + p_survey_info->chan_time_busy_ms, + p_survey_info->noise_dbm); + } else { + len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) -len -1, count), + "%d NOT AVAILABLE\n", + p_chan->center_freq); + } + + survey_cnt++; + } + } + + mutex_unlock(&priv->dbgdump_elem.mutex); + + size = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + buf = NULL; + + return size; +} + +DEBUGFS_READ_FILE_OPS(acsinfo); + +static ssize_t rwnx_dbgfs_fw_dbg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + char help[]="usage: [MOD:]* " + "[DBG:]\n"; + + return simple_read_from_buffer(user_buf, count, ppos, help, sizeof(help)); +} + + +static ssize_t rwnx_dbgfs_fw_dbg_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int idx = 0; + u32 mod = 0; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + +#define RWNX_MOD_TOKEN(str, val) \ + if (strncmp(&buf[idx], str, sizeof(str) - 1 ) == 0) { \ + idx += sizeof(str) - 1; \ + mod |= val; \ + continue; \ + } + +#define RWNX_DBG_TOKEN(str, val) \ + if (strncmp(&buf[idx], str, sizeof(str) - 1) == 0) { \ + idx += sizeof(str) - 1; \ + dbg = val; \ + goto dbg_done; \ + } + + while ((idx + 4) < len) { + if (strncmp(&buf[idx], "MOD:", 4) == 0) { + idx += 4; + RWNX_MOD_TOKEN("ALL", 0xffffffff); + RWNX_MOD_TOKEN("KE", BIT(0)); + RWNX_MOD_TOKEN("DBG", BIT(1)); + RWNX_MOD_TOKEN("IPC", BIT(2)); + RWNX_MOD_TOKEN("DMA", BIT(3)); + RWNX_MOD_TOKEN("MM", BIT(4)); + RWNX_MOD_TOKEN("TX", BIT(5)); + RWNX_MOD_TOKEN("RX", BIT(6)); + RWNX_MOD_TOKEN("PHY", BIT(7)); + idx++; + } else if (strncmp(&buf[idx], "DBG:", 4) == 0) { + u32 dbg = 0; + idx += 4; + RWNX_DBG_TOKEN("NONE", 0); + RWNX_DBG_TOKEN("CRT", 1); + RWNX_DBG_TOKEN("ERR", 2); + RWNX_DBG_TOKEN("WRN", 3); + RWNX_DBG_TOKEN("INF", 4); + RWNX_DBG_TOKEN("VRB", 5); + idx++; + continue; + dbg_done: + rwnx_send_dbg_set_sev_filter_req(priv, dbg); + } else { + idx++; + } + } + + if (mod) { + rwnx_send_dbg_set_mod_filter_req(priv, mod); + } + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg); + +static ssize_t rwnx_dbgfs_sys_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[3*64]; + int len = 0; + ssize_t read; + int error = 0; + struct dbg_get_sys_stat_cfm cfm; + u32 sleep_int, sleep_frac, doze_int, doze_frac; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Get the information from the FW */ + if ((error = rwnx_send_dbg_get_sys_stat_req(priv, &cfm))) + return error; + + if (cfm.stats_time == 0) + return 0; + + sleep_int = ((cfm.cpu_sleep_time * 100) / cfm.stats_time); + sleep_frac = (((cfm.cpu_sleep_time * 100) % cfm.stats_time) * 10) / cfm.stats_time; + doze_int = ((cfm.doze_time * 100) / cfm.stats_time); + doze_frac = (((cfm.doze_time * 100) % cfm.stats_time) * 10) / cfm.stats_time; + + len += scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "\nSystem statistics:\n"); + len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - 1, count), + " CPU sleep [%%]: %d.%d\n", sleep_int, sleep_frac); + len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - 1, count), + " Doze [%%]: %d.%d\n", doze_int, doze_frac); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + return read; +} + +DEBUGFS_READ_FILE_OPS(sys_stats); + +#ifdef CONFIG_RWNX_MUMIMO_TX +static ssize_t rwnx_dbgfs_mu_group_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *rwnx_hw = file->private_data; + struct rwnx_mu_info *mu = &rwnx_hw->mu; + struct rwnx_mu_group *group; + size_t bufsz = NX_MU_GROUP_MAX * sizeof("xx = (xx - xx - xx - xx)\n") + 50; + char *buf; + int j, res, idx = 0; + + if (*ppos) + return 0; + + buf = kmalloc(bufsz, GFP_ATOMIC); + if (buf == NULL) + return 0; + + res = scnprintf(&buf[idx], bufsz, "MU Group list (%d groups, %d users max)\n", + NX_MU_GROUP_MAX, CONFIG_USER_MAX); + idx += res; + bufsz -= res; + + list_for_each_entry(group, &mu->active_groups, list) { + if (group->user_cnt) { + res = scnprintf(&buf[idx], bufsz, "%2d = (", group->group_id); + idx += res; + bufsz -= res; + for (j = 0; j < (CONFIG_USER_MAX - 1) ; j++) { + if (group->users[j]) + res = scnprintf(&buf[idx], bufsz, "%2d - ", + group->users[j]->sta_idx); + else + res = scnprintf(&buf[idx], bufsz, ".. - "); + + idx += res; + bufsz -= res; + } + + if (group->users[j]) + res = scnprintf(&buf[idx], bufsz, "%2d)\n", + group->users[j]->sta_idx); + else + res = scnprintf(&buf[idx], bufsz, "..)\n"); + + idx += res; + bufsz -= res; + } + } + + res = simple_read_from_buffer(user_buf, count, ppos, buf, idx); + kfree(buf); + + return res; +} + +DEBUGFS_READ_FILE_OPS(mu_group); +#endif + +#ifdef CONFIG_RWNX_P2P_DEBUGFS +static ssize_t rwnx_dbgfs_oppps_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *rw_hw = file->private_data; + struct rwnx_vif *rw_vif; + char buf[32]; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + int ctw; + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + /* Read the written CT Window (provided in ms) value */ + if (sscanf(buf, "ctw=%d", &ctw) > 0) { + /* Check if at least one VIF is configured as P2P GO */ + list_for_each_entry(rw_vif, &rw_hw->vifs, list) { +#ifdef CONFIG_RWNX_FULLMAC + if (RWNX_VIF_TYPE(rw_vif) == NL80211_IFTYPE_P2P_GO) { +#endif /* CONFIG_RWNX_FULLMAC */ + struct mm_set_p2p_oppps_cfm cfm; + + /* Forward request to the embedded and wait for confirmation */ + rwnx_send_p2p_oppps_req(rw_hw, rw_vif, (u8)ctw, &cfm); + + break; + } + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(oppps); + +static ssize_t rwnx_dbgfs_noa_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *rw_hw = file->private_data; + struct rwnx_vif *rw_vif; + char buf[64]; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + int noa_count, interval, duration, dyn_noa; + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + /* Read the written NOA information */ + if (sscanf(buf, "count=%d interval=%d duration=%d dyn=%d", + &noa_count, &interval, &duration, &dyn_noa) > 0) { + /* Check if at least one VIF is configured as P2P GO */ + list_for_each_entry(rw_vif, &rw_hw->vifs, list) { +#ifdef CONFIG_RWNX_FULLMAC + if (RWNX_VIF_TYPE(rw_vif) == NL80211_IFTYPE_P2P_GO) { +#endif /* CONFIG_RWNX_FULLMAC */ + struct mm_set_p2p_noa_cfm cfm; + + /* Forward request to the embedded and wait for confirmation */ + rwnx_send_p2p_noa_req(rw_hw, rw_vif, noa_count, interval, + duration, (dyn_noa > 0), &cfm); + + break; + } + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(noa); +#endif /* CONFIG_RWNX_P2P_DEBUGFS */ + +static char fw_log_buffer[FW_LOG_SIZE]; + +static ssize_t rwnx_dbgfs_fw_log_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + size_t not_cpy; + size_t nb_cpy; + char *log = fw_log_buffer; + + printk("%s, %d, %p, %p\n", __func__, priv->debugfs.fw_log.buf.size, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.dataend); + //spin_lock_bh(&priv->debugfs.fw_log.lock); + + if ((priv->debugfs.fw_log.buf.start + priv->debugfs.fw_log.buf.size) >= priv->debugfs.fw_log.buf.dataend) { + memcpy(log, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.dataend - priv->debugfs.fw_log.buf.start); + not_cpy = copy_to_user(user_buf, log, priv->debugfs.fw_log.buf.dataend - priv->debugfs.fw_log.buf.start); + nb_cpy = priv->debugfs.fw_log.buf.dataend - priv->debugfs.fw_log.buf.start - not_cpy; + priv->debugfs.fw_log.buf.start = priv->debugfs.fw_log.buf.data; + } else { + memcpy(log, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.size); + not_cpy = copy_to_user(user_buf, log, priv->debugfs.fw_log.buf.size); + nb_cpy = priv->debugfs.fw_log.buf.size - not_cpy; + priv->debugfs.fw_log.buf.start = priv->debugfs.fw_log.buf.start + priv->debugfs.fw_log.buf.size - not_cpy; + } + + priv->debugfs.fw_log.buf.size -= nb_cpy; + //spin_unlock_bh(&priv->debugfs.fw_log.lock); + + printk("nb_cpy=%lu, not_cpy=%lu, start=%p, end=%p\n", (long unsigned int)nb_cpy, (long unsigned int)not_cpy, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.end); + return nb_cpy; +} + +static ssize_t rwnx_dbgfs_fw_log_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + //struct rwnx_hw *priv = file->private_data; + + printk("%s\n", __func__); + return count; +} +DEBUGFS_READ_WRITE_FILE_OPS(fw_log); + +#ifdef CONFIG_RWNX_RADAR +static ssize_t rwnx_dbgfs_pulses_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos, + int rd_idx) +{ + struct rwnx_hw *priv = file->private_data; + char *buf; + int len = 0; + int bufsz; + int i; + int index; + struct rwnx_radar_pulses *p = &priv->radar.pulses[rd_idx]; + ssize_t read; + + if (*ppos != 0) + return 0; + + /* Prevent from interrupt preemption */ + spin_lock_bh(&priv->radar.lock); + bufsz = p->count * 34 + 51; + bufsz += rwnx_radar_dump_pattern_detector(NULL, 0, &priv->radar, rd_idx); + buf = kmalloc(bufsz, GFP_ATOMIC); + if (buf == NULL) { + spin_unlock_bh(&priv->radar.lock); + return 0; + } + + if (p->count) { + len += scnprintf(&buf[len], bufsz - len, + " PRI WIDTH FOM FREQ\n"); + index = p->index; + for (i = 0; i < p->count; i++) { + struct radar_pulse *pulse; + + if (index > 0) + index--; + else + index = RWNX_RADAR_PULSE_MAX - 1; + + pulse = (struct radar_pulse *) &p->buffer[index]; + + len += scnprintf(&buf[len], bufsz - len, + "%05dus %03dus %2d%% %+3dMHz\n", pulse->rep, + 2 * pulse->len, 6 * pulse->fom, 2*pulse->freq); + } + } + + len += rwnx_radar_dump_pattern_detector(&buf[len], bufsz - len, + &priv->radar, rd_idx); + + spin_unlock_bh(&priv->radar.lock); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + + return read; +} + +static ssize_t rwnx_dbgfs_pulses_prim_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return rwnx_dbgfs_pulses_read(file, user_buf, count, ppos, 0); +} + +DEBUGFS_READ_FILE_OPS(pulses_prim); + +static ssize_t rwnx_dbgfs_pulses_sec_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return rwnx_dbgfs_pulses_read(file, user_buf, count, ppos, 1); +} + +DEBUGFS_READ_FILE_OPS(pulses_sec); + +static ssize_t rwnx_dbgfs_detected_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char *buf; + int bufsz,len = 0; + ssize_t read; + + if (*ppos != 0) + return 0; + + bufsz = 5; // RIU:\n + bufsz += rwnx_radar_dump_radar_detected(NULL, 0, &priv->radar, + RWNX_RADAR_RIU); + + if (priv->phy.cnt > 1) { + bufsz += 5; // FCU:\n + bufsz += rwnx_radar_dump_radar_detected(NULL, 0, &priv->radar, + RWNX_RADAR_FCU); + } + + buf = kmalloc(bufsz, GFP_KERNEL); + if (buf == NULL) { + return 0; + } + + len = scnprintf(&buf[len], bufsz, "RIU:\n"); + len += rwnx_radar_dump_radar_detected(&buf[len], bufsz - len, &priv->radar, + RWNX_RADAR_RIU); + + if (priv->phy.cnt > 1) { + len += scnprintf(&buf[len], bufsz - len, "FCU:\n"); + len += rwnx_radar_dump_radar_detected(&buf[len], bufsz - len, + &priv->radar, RWNX_RADAR_FCU); + } + + read = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + + return read; +} + +DEBUGFS_READ_FILE_OPS(detected); + +static ssize_t rwnx_dbgfs_enable_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int ret; + ssize_t read; + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "RIU=%d FCU=%d\n", priv->radar.dpd[RWNX_RADAR_RIU]->enabled, + priv->radar.dpd[RWNX_RADAR_FCU]->enabled); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + return read; +} + +static ssize_t rwnx_dbgfs_enable_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int val; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "RIU=%d", &val) > 0) + rwnx_radar_detection_enable(&priv->radar, val, RWNX_RADAR_RIU); + + if (sscanf(buf, "FCU=%d", &val) > 0) + rwnx_radar_detection_enable(&priv->radar, val, RWNX_RADAR_FCU); + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(enable); + +static ssize_t rwnx_dbgfs_band_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int ret; + ssize_t read; + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "BAND=%d\n", priv->phy.sec_chan.band); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + return read; +} + +static ssize_t rwnx_dbgfs_band_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int val; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + int band_max = NL80211_BAND_5GHZ; + + if (priv->band_5g_support){ + band_max = NL80211_BAND_5GHZ + 1; + } + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + +// #ifdef USE_5G +// if ((sscanf(buf, "%d", &val) > 0) && (val >= 0) && (val <= NL80211_BAND_5GHZ)) +// #else +// if ((sscanf(buf, "%d", &val) > 0) && (val >= 0) && (val < NL80211_BAND_5GHZ)) +// #endif + if ((sscanf(buf, "%d", &val) > 0) && (val >= 0) && (val < band_max)){ + priv->phy.sec_chan.band = val; + } + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(band); + +static ssize_t rwnx_dbgfs_type_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int ret; + ssize_t read; + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "TYPE=%d\n", priv->phy.sec_chan.type); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + return read; +} + +static ssize_t rwnx_dbgfs_type_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int val; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if ((sscanf(buf, "%d", &val) > 0) && (val >= PHY_CHNL_BW_20) && + (val <= PHY_CHNL_BW_80P80)) + priv->phy.sec_chan.type = val; + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(type); + +static ssize_t rwnx_dbgfs_prim20_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int ret; + ssize_t read; + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "PRIM20=%dMHz\n", priv->phy.sec_chan.prim20_freq); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + return read; +} + +static ssize_t rwnx_dbgfs_prim20_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int val; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "%d", &val) > 0) + priv->phy.sec_chan.prim20_freq = val; + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(prim20); + +static ssize_t rwnx_dbgfs_center1_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int ret; + ssize_t read; + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "CENTER1=%dMHz\n", priv->phy.sec_chan.center_freq1); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + return read; +} + +static ssize_t rwnx_dbgfs_center1_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int val; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "%d", &val) > 0) + priv->phy.sec_chan.center_freq1 = val; + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(center1); + +static ssize_t rwnx_dbgfs_center2_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int ret; + ssize_t read; + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "CENTER2=%dMHz\n", priv->phy.sec_chan.center_freq2); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + return read; +} + +static ssize_t rwnx_dbgfs_center2_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int val; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "%d", &val) > 0) + priv->phy.sec_chan.center_freq2 = val; + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(center2); + + +static ssize_t rwnx_dbgfs_set_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} + +static ssize_t rwnx_dbgfs_set_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + + rwnx_send_set_channel(priv, 1, NULL); + rwnx_radar_detection_enable(&priv->radar, RWNX_RADAR_DETECT_ENABLE, + RWNX_RADAR_FCU); + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(set); +#endif /* CONFIG_RWNX_RADAR */ + +static ssize_t rwnx_dbgfs_regdbg_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + + struct rwnx_hw *priv = file->private_data; + char buf[32]; + u32 addr,val, oper; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + struct dbg_mem_read_cfm mem_read_cfm; + int ret; + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "%x %x %x" , &oper, &addr, &val ) > 0) + printk("addr=%x, val=%x,oper=%d\n", addr, val, oper); + + if(oper== 0) { + ret = rwnx_send_dbg_mem_read_req(priv, addr, &mem_read_cfm); + printk("[0x%x] = [0x%x]\n", mem_read_cfm.memaddr, mem_read_cfm.memdata); + } else if (oper == 1) { + ret = rwnx_send_dbg_mem_read_req(priv, addr, &mem_read_cfm); + printk("before write : [0x%x] = [0x%x]\n", mem_read_cfm.memaddr, mem_read_cfm.memdata); + ret = rwnx_send_dbg_mem_block_write_req(priv, addr, 4, &val); + ret = rwnx_send_dbg_mem_read_req(priv, addr, &mem_read_cfm); + printk("after write : [0x%x] = [0x%x]\n", mem_read_cfm.memaddr, mem_read_cfm.memdata); + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(regdbg); + +static ssize_t rwnx_dbgfs_vendor_hwconfig_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[64]; + int32_t addr[14]; + int32_t addr_out[12]; + u32_l hwconfig_id; + size_t len = min_t(size_t,count,sizeof(buf)-1); + int ret; + printk("%s\n",__func__); + //choose the type of write info by struct + //struct mm_set_vendor_trx_param_req trx_param; + + if(copy_from_user(buf,user_buf,len)) { + return -EFAULT; + } + + buf[len] = '\0'; + ret = sscanf(buf, "%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x", + &hwconfig_id, &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5], &addr[6], &addr[7], &addr[8], &addr[9], &addr[10], &addr[11], &addr[12], &addr[13]); + if(ret > 15) { + printk("param error > 15\n"); + } else { + switch(hwconfig_id) + { + case 0: + if(ret != 5) { + printk("param error != 5\n"); + break;} + ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr, NULL); + printk("ACS_TXOP_REQ bk:0x%x be:0x%x vi:0x%x vo:0x%x\n",addr[0], addr[1], addr[2], addr[3]); + break; + case 1: + if(ret != 15) { + printk("param error != 15\n"); + break;} + ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr, NULL); + printk("CHANNEL_ACCESS_REQ edca:%x,%x,%x,%x, vif:%x, retry_cnt:%x, rts:%x, long_nav:%x, cfe:%x, rc_retry_cnt:%x:%x:%x ccademod_th %x, remove_1m2m %x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], addr[11], addr[12], addr[13]); + break; + case 2: + if(ret != 7) { + printk("param error != 7\n"); + break;} + ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr, NULL); + printk("MAC_TIMESCALE_REQ sifsA:%x,sifsB:%x,slot:%x,ofdm_delay:%x,long_delay:%x,short_delay:%x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + break; + case 3: + if(ret != 6) { + printk("param error != 6\n"); + break;} + addr[1] = ~addr[1] + 1; + addr[2] = ~addr[2] + 1; + addr[3] = ~addr[3] + 1; + addr[4] = ~addr[4] + 1; + ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr, NULL); + printk("CCA_THRESHOLD_REQ auto_cca:%d, cca20p_rise:%d cca20s_rise:%d cca20p_fail:%d cca20s_fail:%d\n", + addr[0], addr[1], addr[2], addr[3], addr[4]); + break; + case 4: // BWMODE_REQ + if (ret != 2) { + printk("param error != 2\n"); + } else { + ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr, NULL); + printk("BWMODE_REQ md=%d\n", addr[0]); + } + break; + case 5: // CHIP_TEMP_GET_REQ + if (ret != 1) { + printk("param error != 1\n"); + } else { + ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr, addr_out); + printk("CHIP_TEMP_GET_REQ degree=%d\n", addr_out[0]); + } + break; + default: + printk("param error\n"); + break; + } + if(ret) { + printk("rwnx_send_vendor_hwconfig_req fail: %x\n", ret); + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(vendor_hwconfig); + +static ssize_t rwnx_dbgfs_vendor_swconfig_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[64]; + int32_t addr[12]; + int32_t addr_out[12]; + u32_l swconfig_id; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + int ret; + printk("%s\n", __func__); + + if (copy_from_user(buf, user_buf, len)) { + return -EFAULT; + } + + buf[len] = '\0'; + ret = sscanf(buf, "%x %x %x", &swconfig_id, &addr[0], &addr[1]); + if (ret > 3) { + printk("param error > 3\n"); + } else { + switch (swconfig_id) + { + case 0: // BCN_CFG_REQ + if (ret != 2) { + printk("param error != 2\n"); + } else { + ret = rwnx_send_vendor_swconfig_req(priv, swconfig_id, addr, addr_out); + printk("BCN_CFG_REQ set_en=%d, get_en=%d\n", addr[0], addr_out[0]); + } + break; + + case 1: // TEMP_COMP_SET_REQ + if (ret != 3) { + printk("param error != 3\n"); + } else { + ret = rwnx_send_vendor_swconfig_req(priv, swconfig_id, addr, addr_out); + printk("TEMP_COMP_SET_REQ set_en=%d, tmr=%dms, get_st=%d\n", + addr[0], addr[1], addr_out[0]); + } + break; + + case 2: // TEMP_COMP_GET_REQ + if (ret != 1) { + printk("param error != 1\n"); + } else { + ret = rwnx_send_vendor_swconfig_req(priv, swconfig_id, addr, addr_out); + printk("TEMP_COMP_GET_REQ get_st=%d, degree=%d\n", addr_out[0], addr_out[1]); + } + break; + + case 3: // EXT_FLAGS_SET_REQ + if (ret != 2) { + printk("param error != 2\n"); + } else { + ret = rwnx_send_vendor_swconfig_req(priv, swconfig_id, addr, addr_out); + printk("EXT_FLAGS_SET_REQ set ext_flags=0x%x, get ext_flags=0x%x\n", + addr[0], addr_out[0]); + } + break; + + case 4: // EXT_FLAGS_GET_REQ + if (ret != 1) { + printk("param error != 1\n"); + } else { + ret = rwnx_send_vendor_swconfig_req(priv, swconfig_id, addr, addr_out); + printk("EXT_FLAGS_GET_REQ get ext_flags=0x%x\n", addr_out[0]); + } + break; + + case 5: // EXT_FLAGS_MASK_SET_REQ + if (ret != 3) { + printk("param error != 3\n"); + } else { + ret = rwnx_send_vendor_swconfig_req(priv, swconfig_id, addr, addr_out); + printk("EXT_FLAGS_MASK_SET_REQ set ext_flags mask=0x%x, val=0x%x, get ext_flags=0x%x\n", + addr[0], addr[1], addr_out[0]); + } + break; + + default: + printk("param error\n"); + break; + } + + if (ret) { + printk("rwnx_send_vendor_swconfig_req fail: %x\n", ret); + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(vendor_swconfig); + +static ssize_t rwnx_dbgfs_vendor_hwconfig_x2_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[64]; + int32_t addr[14]; + int32_t addr_out[12]; + u32_l hwconfig_id; + size_t len = min_t(size_t,count,sizeof(buf)-1); + int ret; + printk("%s\n",__func__); + //choose the type of write info by struct + //struct mm_set_vendor_trx_param_req trx_param; + + if(copy_from_user(buf,user_buf,len)) { + return -EFAULT; + } + + buf[len] = '\0'; + ret = sscanf(buf, "%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x", + &hwconfig_id, &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5], &addr[6], &addr[7], &addr[8], &addr[9], &addr[10], &addr[11], &addr[12], &addr[13]); + if(ret > 15) { + printk("param error > 15\n"); + } else { + switch(hwconfig_id) + { + case 0: + if(ret != 5) { + printk("param error != 5\n"); + break;} + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, NULL); + printk("ACS_TXOP_REQ bk:0x%x be:0x%x vi:0x%x vo:0x%x\n",addr[0], addr[1], addr[2], addr[3]); + break; + case 1: + if(ret != 15) { + printk("param error != 15\n"); + break;} + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, NULL); + printk("CHANNEL_ACCESS_REQ edca:%x,%x,%x,%x, vif:%x, retry_cnt:%x, rts:%x, long_nav:%x, cfe:%x, rc_retry_cnt:%x:%x:%x ccademod_th %x remove_1m2m %x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], addr[11], addr[12], addr[13]); + break; + case 2: + if(ret != 7) { + printk("param error != 7\n"); + break;} + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, NULL); + printk("MAC_TIMESCALE_REQ sifsA:%x,sifsB:%x,slot:%x,ofdm_delay:%x,long_delay:%x,short_delay:%x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + break; + case 3: + if(ret != 6) { + printk("param error != 6\n"); + break;} + addr[1] = ~addr[1] + 1; + addr[2] = ~addr[2] + 1; + addr[3] = ~addr[3] + 1; + addr[4] = ~addr[4] + 1; + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, NULL); + printk("CCA_THRESHOLD_REQ auto_cca:%d, cca20p_rise:%d cca20s_rise:%d cca20p_fail:%d cca20s_fail:%d\n", + addr[0], addr[1], addr[2], addr[3], addr[4]); + break; + case 4: // BWMODE_REQ + if (ret != 2) { + printk("param error != 2\n"); + } else { + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, NULL); + printk("BWMODE_REQ md=%d\n", addr[0]); + } + break; + case 5: // CHIP_TEMP_GET_REQ + if (ret != 1) { + printk("param error != 1\n"); + } else { + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, addr_out); + printk("CHIP_TEMP_GET_REQ degree=%d\n", addr_out[0]); + } + break; + case 6: // STBC_MCS_SET_REQ + if (ret != 3) { + printk("param error != 3\n"); + } else { + printk("ret=%d, id=%d, param=%x, %x\n", ret, hwconfig_id, addr[0], addr[1]); + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, addr_out); + } + break; + case 7: + printk("ret=%d, id=%d, param=%x, %x\n", ret, hwconfig_id, addr[0], addr[1]); + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, addr_out); + break; + case 8: + case 9: + case 10: + case 11: + case 12: + ret = rwnx_send_vendor_hwconfig_req_x2(priv, hwconfig_id, addr, addr_out); + break; + default: + printk("param error:%d\n", hwconfig_id); + break; + } + if(ret) { + printk("rwnx_send_vendor_hwconfig_req fail: %x\n", ret); + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(vendor_hwconfig_x2); + +static ssize_t rwnx_dbgfs_vendor_swconfig_x2_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[64]; + int32_t addr[12]; + int32_t addr_out[12]; + u32_l swconfig_id; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + int ret; + printk("%s\n", __func__); + + if (copy_from_user(buf, user_buf, len)) { + return -EFAULT; + } + + buf[len] = '\0'; + ret = sscanf(buf, "%x %x %x", &swconfig_id, &addr[0], &addr[1]); + if (ret > 3) { + printk("param error > 3\n"); + } else { + switch (swconfig_id) + { + case 0: // BCN_CFG_REQ + if (ret != 2) { + printk("param error != 2\n"); + } else { + ret = rwnx_send_vendor_swconfig_req_x2(priv, swconfig_id, addr, addr_out); + printk("BCN_CFG_REQ set_en=%d, get_en=%d\n", addr[0], addr_out[0]); + } + break; + + case 1: // TEMP_COMP_SET_REQ + if (ret != 3) { + printk("param error != 3\n"); + } else { + ret = rwnx_send_vendor_swconfig_req_x2(priv, swconfig_id, addr, addr_out); + printk("TEMP_COMP_SET_REQ set_en=%d, tmr=%dms, get_st=%d\n", + addr[0], addr[1], addr_out[0]); + } + break; + + case 2: // TEMP_COMP_GET_REQ + if (ret != 1) { + printk("param error != 1\n"); + } else { + ret = rwnx_send_vendor_swconfig_req_x2(priv, swconfig_id, addr, addr_out); + printk("TEMP_COMP_GET_REQ get_st=%d, degree=%d\n", addr_out[0], addr_out[1]); + } + break; + + case 3: // EXT_FLAGS_SET_REQ + if (ret != 2) { + printk("param error != 2\n"); + } else { + ret = rwnx_send_vendor_swconfig_req_x2(priv, swconfig_id, addr, addr_out); + printk("EXT_FLAGS_SET_REQ set ext_flags=0x%x, get ext_flags=0x%x\n", + addr[0], addr_out[0]); + } + break; + + case 4: // EXT_FLAGS_GET_REQ + if (ret != 1) { + printk("param error != 1\n"); + } else { + ret = rwnx_send_vendor_swconfig_req_x2(priv, swconfig_id, addr, addr_out); + printk("EXT_FLAGS_GET_REQ get ext_flags=0x%x\n", addr_out[0]); + } + break; + + case 5: // EXT_FLAGS_MASK_SET_REQ + if (ret != 3) { + printk("param error != 3\n"); + } else { + ret = rwnx_send_vendor_swconfig_req_x2(priv, swconfig_id, addr, addr_out); + printk("EXT_FLAGS_MASK_SET_REQ set ext_flags mask=0x%x, val=0x%x, get ext_flags=0x%x\n", + addr[0], addr[1], addr_out[0]); + } + break; + case 6: // TWO_ANT_RSSI_GET_REQ + ret = rwnx_send_vendor_swconfig_req_x2(priv, swconfig_id, addr, addr_out); + printk("2 ant rssi=%x\n", addr_out[0]); + break; + default: + printk("param error\n"); + break; + } + + if (ret) { + printk("rwnx_send_vendor_swconfig_req fail: %x\n", ret); + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(vendor_swconfig_x2); + +static ssize_t rwnx_dbgfs_agg_disable_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[64]; + int agg_disable, agg_disable_rx, sta_idx; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + int ret; + printk("%s\n", __func__); + + if (copy_from_user(buf, user_buf, len)) { + return -EFAULT; + } + + buf[len] = '\0'; + ret = sscanf(buf, "%d %d %d", &agg_disable, &agg_disable_rx, &sta_idx); + if ((ret > 3) || (ret < 2)) { + printk("param error: cnt=%d\n", ret); + } else { + if (ret < 3) { + sta_idx = RWNX_INVALID_STA; + } + printk("disable_agg: T=%d, R=%d, staidx=%d\n", agg_disable, agg_disable_rx, sta_idx); + ret = rwnx_send_disable_agg_req(priv, (u8_l)agg_disable, (u8_l)agg_disable_rx, (u8_l)sta_idx); + if (ret) { + printk("rwnx_send_disable_agg_req fail: %d\n", ret); + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(agg_disable); + +static ssize_t rwnx_dbgfs_set_roc_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct rwnx_vif *vif = NULL; + char buf[64]; + int roc_start, chan_freq, duration; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + int ret; + int if_type = NL80211_IFTYPE_STATION; + printk("%s\n", __func__); + + if (copy_from_user(buf, user_buf, len)) { + return -EFAULT; + } + + buf[len] = '\0'; + ret = sscanf(buf, "%d %d %d", &roc_start, &chan_freq, &duration); + + list_for_each_entry(vif, &priv->vifs, list) { + if (RWNX_VIF_TYPE(vif) == if_type) { + break; + } + } + if ((ret > 3) || (ret < 1)) { + printk("param error: cnt=%d\n", ret); + } else if (vif) { + struct ieee80211_channel chan_entry = {0,}; + struct ieee80211_channel *chan = &chan_entry; + struct mm_remain_on_channel_cfm roc_cfm; + if (ret < 3) { + duration = 2000; + if (ret < 2) { + chan_freq = 2412; + } + } + printk("set_roc: start=%d, freq=%d\n", roc_start, chan_freq); + if (roc_start) { + if (chan_freq <= 2484) { + chan->band = NL80211_BAND_2GHZ; + } else { + chan->band = NL80211_BAND_5GHZ; + } + chan->center_freq = chan_freq; + chan->max_power = 18; + ret = rwnx_send_roc(priv, vif, chan, duration, &roc_cfm); + if (ret) { + printk("rwnx_send_roc fail: %d\n", ret); + } else { + printk("roc_cfm: opcode=%x, st=%d, idx=%d\n", roc_cfm.op_code, roc_cfm.status, roc_cfm.chan_ctxt_index); + } + } else { + ret = rwnx_send_cancel_roc(priv); + if (ret) { + printk("rwnx_send_cancel_roc fail: %d\n", ret); + } + } + } + + return count; +} + +DEBUGFS_WRITE_FILE_OPS(set_roc); + +extern int aicwf_dbg_level; +static ssize_t rwnx_dbgfs_dbg_level_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int ret; + ssize_t read; + + printk("dbg level case 1 3 7 15 31 \n"); + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "dbg_level=%d\n", aicwf_dbg_level); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + return read; +} + + +static ssize_t rwnx_dbgfs_dbg_level_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[32]; + int val; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + printk("now aicwf_dbg_level = %d ;please set level case 1 3 7 15 31 \n",aicwf_dbg_level); + + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if((sscanf(buf, "%d", &val) > 0)&&(val < 32)) + aicwf_dbg_level = val; + + printk("set aicwf_dbg_level = %d \n",aicwf_dbg_level); + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(dbg_level); + + +#ifdef CONFIG_RWNX_FULLMAC + +#define LINE_MAX_SZ 150 + +struct st { + char line[LINE_MAX_SZ + 1]; + unsigned int r_idx; +}; + +static int compare_idx(const void *st1, const void *st2) +{ + int index1 = ((struct st *)st1)->r_idx; + int index2 = ((struct st *)st2)->r_idx; + + if (index1 > index2) return 1; + if (index1 < index2) return -1; + + return 0; +} + +static const int ru_size[] = +{ + 26, + 52, + 106, + 242, + 484, + 996 +}; + +static int print_rate(char *buf, int size, int format, int nss, int mcs, int bw, + int sgi, int pre, int *r_idx) +{ + int res = 0; + int bitrates_cck[4] = { 10, 20, 55, 110 }; + int bitrates_ofdm[8] = { 6, 9, 12, 18, 24, 36, 48, 54}; + char he_gi[3][4] = {"0.8", "1.6", "3.2"}; + + if (format < FORMATMOD_HT_MF) { + if (mcs < 4) { + if (r_idx) { + *r_idx = (mcs * 2) + pre; + res = scnprintf(buf, size - res, "%3d ", *r_idx); + } + res += scnprintf(&buf[res], size - res, "L-CCK/%cP %2u.%1uM ", + pre > 0 ? 'L' : 'S', + bitrates_cck[mcs] / 10, + bitrates_cck[mcs] % 10); + } else { + mcs -= 4; + if (r_idx) { + *r_idx = N_CCK + mcs; + res = scnprintf(buf, size - res, "%3d ", *r_idx); + } + res += scnprintf(&buf[res], size - res, "L-OFDM %2u.0M ", + bitrates_ofdm[mcs]); + } + } else if (format < FORMATMOD_VHT) { + if (r_idx) { + *r_idx = N_CCK + N_OFDM + nss * 32 + mcs * 4 + bw * 2 + sgi; + res = scnprintf(buf, size - res, "%3d ", *r_idx); + } + mcs += nss * 8; + res += scnprintf(&buf[res], size - res, "HT%d/%cGI MCS%-2d ", + 20 * (1 << bw), sgi ? 'S' : 'L', mcs); + } else if (format == FORMATMOD_VHT){ + if (r_idx) { + *r_idx = N_CCK + N_OFDM + N_HT + nss * 80 + mcs * 8 + bw * 2 + sgi; + res = scnprintf(buf, size - res, "%3d ", *r_idx); + } + res += scnprintf(&buf[res], size - res, "VHT%d/%cGI%*cMCS%d/%1d ", + 20 * (1 << bw), sgi ? 'S' : 'L', bw > 2 ? 5 : 6, ' ', + mcs, nss + 1); + } else if (format == FORMATMOD_HE_SU){ + if (r_idx) { + *r_idx = N_CCK + N_OFDM + N_HT + N_VHT + nss * 144 + mcs * 12 + bw * 3 + sgi; + res = scnprintf(buf, size - res, "%3d ", *r_idx); + } + res += scnprintf(&buf[res], size - res, "HE%d/GI%s%*cMCS%d/%1d%*c", + 20 * (1 << bw), he_gi[sgi], bw > 2 ? 4 : 5, ' ', + mcs, nss + 1, mcs > 9 ? 1 : 2, ' '); + } else { + if (r_idx) { + *r_idx = N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU + nss * 216 + mcs * 18 + bw * 3 + sgi; + res = scnprintf(buf, size - res, "%3d ", *r_idx); + } + res += scnprintf(&buf[res], size - res, "HEMU-%d/GI%s%*cMCS%d/%1d%*c", + ru_size[bw], he_gi[sgi], bw > 1 ? 1 : 2, ' ', + mcs, nss + 1, mcs > 9 ? 1 : 2, ' '); + + } + + return res; +} + +static int print_rate_from_cfg(char *buf, int size, u32 rate_config, int *r_idx, int ru_size) +{ + union rwnx_rate_ctrl_info *r_cfg = (union rwnx_rate_ctrl_info *)&rate_config; + union rwnx_mcs_index *mcs_index = (union rwnx_mcs_index *)&rate_config; + unsigned int ft, pre, gi, bw, nss, mcs, len; + + ft = r_cfg->formatModTx; + pre = r_cfg->giAndPreTypeTx >> 1; + gi = r_cfg->giAndPreTypeTx; + bw = r_cfg->bwTx; + if (ft == FORMATMOD_HE_MU) { + mcs = mcs_index->he.mcs; + nss = mcs_index->he.nss; + bw = ru_size; + } else if (ft == FORMATMOD_HE_SU) { + mcs = mcs_index->he.mcs; + nss = mcs_index->he.nss; + } else if (ft == FORMATMOD_VHT) { + mcs = mcs_index->vht.mcs; + nss = mcs_index->vht.nss; + } else if (ft >= FORMATMOD_HT_MF) { + mcs = mcs_index->ht.mcs; + nss = mcs_index->ht.nss; + } else { + mcs = mcs_index->legacy; + nss = 0; + } + + len = print_rate(buf, size, ft, nss, mcs, bw, gi, pre, r_idx); + return len; +} + +static void idx_to_rate_cfg(int idx, union rwnx_rate_ctrl_info *r_cfg, int *ru_size) +{ + r_cfg->value = 0; + if (idx < N_CCK) + { + r_cfg->formatModTx = FORMATMOD_NON_HT; + r_cfg->giAndPreTypeTx = (idx & 1) << 1; + r_cfg->mcsIndexTx = idx / 2; + } + else if (idx < (N_CCK + N_OFDM)) + { + r_cfg->formatModTx = FORMATMOD_NON_HT; + r_cfg->mcsIndexTx = idx - N_CCK + 4; + } + else if (idx < (N_CCK + N_OFDM + N_HT)) + { + union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg; + + idx -= (N_CCK + N_OFDM); + r_cfg->formatModTx = FORMATMOD_HT_MF; + r->ht.nss = idx / (8*2*2); + r->ht.mcs = (idx % (8*2*2)) / (2*2); + r_cfg->bwTx = ((idx % (8*2*2)) % (2*2)) / 2; + r_cfg->giAndPreTypeTx = idx & 1; + } + else if (idx < (N_CCK + N_OFDM + N_HT + N_VHT)) + { + union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg; + + idx -= (N_CCK + N_OFDM + N_HT); + r_cfg->formatModTx = FORMATMOD_VHT; + r->vht.nss = idx / (10*4*2); + r->vht.mcs = (idx % (10*4*2)) / (4*2); + r_cfg->bwTx = ((idx % (10*4*2)) % (4*2)) / 2; + r_cfg->giAndPreTypeTx = idx & 1; + } + else if (idx < (N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU)) + { + union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg; + + idx -= (N_CCK + N_OFDM + N_HT + N_VHT); + r_cfg->formatModTx = FORMATMOD_HE_SU; + r->vht.nss = idx / (12*4*3); + r->vht.mcs = (idx % (12*4*3)) / (4*3); + r_cfg->bwTx = ((idx % (12*4*3)) % (4*3)) / 3; + r_cfg->giAndPreTypeTx = idx % 3; + } + else + { + union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg; + + BUG_ON(ru_size == NULL); + + idx -= (N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU); + r_cfg->formatModTx = FORMATMOD_HE_MU; + r->vht.nss = idx / (12*6*3); + r->vht.mcs = (idx % (12*6*3)) / (6*3); + *ru_size = ((idx % (12*6*3)) % (6*3)) / 3; + r_cfg->giAndPreTypeTx = idx % 3; + r_cfg->bwTx = 0; + } +} + +static void idx_to_rate_cfg1(unsigned int formatmod, + unsigned int mcs,unsigned int nss, + unsigned int bwTx,unsigned int gi, + union rwnx_rate_ctrl_info *r_cfg, int *ru_size) +{ + r_cfg->value = 0; + + switch(formatmod){ + case FORMATMOD_NON_HT: + { + r_cfg->formatModTx = formatmod; + r_cfg->giAndPreTypeTx = 1; + r_cfg->mcsIndexTx = mcs; + break; + } + case FORMATMOD_NON_HT_DUP_OFDM: + { + r_cfg->formatModTx = formatmod; + r_cfg->giAndPreTypeTx = gi; + r_cfg->mcsIndexTx = mcs; + break; + } + case FORMATMOD_HT_MF: + { + union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg; + + r_cfg->formatModTx = formatmod; + r->ht.nss = nss; + r->ht.mcs = mcs; + r_cfg->bwTx = bwTx; + r_cfg->giAndPreTypeTx = gi; + break; + } + case FORMATMOD_VHT: + case FORMATMOD_HE_SU: + { + union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg; + + r_cfg->formatModTx = formatmod; + r->vht.nss = nss; + r->vht.mcs = mcs; + r_cfg->bwTx = bwTx; + r_cfg->giAndPreTypeTx = gi; + break; + } + case FORMATMOD_HE_MU: + { + union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg; + + r_cfg->formatModTx = formatmod; + r->he.nss = nss; + r->he.mcs = mcs; + r_cfg->bwTx = 0; + r_cfg->giAndPreTypeTx = gi; + break; + } + default: + printk("Don't have the formatmod"); + } +} + +static ssize_t rwnx_dbgfs_rc_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_sta *sta = NULL; + struct rwnx_hw *priv = file->private_data; + char *buf; + int bufsz, len = 0; + ssize_t read; + int i = 0; + int error = 0; + struct me_rc_stats_cfm me_rc_stats_cfm; + unsigned int no_samples; + struct st *st; + u8 mac[6]; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* everything should fit in one call */ + if (*ppos) + return 0; + + /* Get the station index from MAC address */ + sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + //if (mac == NULL) + // return 0; + sta = rwnx_get_sta(priv, mac); + if (sta == NULL) + return 0; + + /* Forward the information to the LMAC */ + if ((error = rwnx_send_me_rc_stats(priv, sta->sta_idx, &me_rc_stats_cfm))) + return error; + + no_samples = me_rc_stats_cfm.no_samples; + if (no_samples == 0) + return 0; + + bufsz = no_samples * LINE_MAX_SZ + 500; + + buf = kmalloc(bufsz + 1, GFP_ATOMIC); + if (buf == NULL) + return 0; + + st = kmalloc(sizeof(struct st) * no_samples, GFP_ATOMIC); + if (st == NULL) + { + kfree(buf); + return 0; + } + + for (i = 0; i < no_samples; i++) + { + unsigned int tp, eprob; + len = print_rate_from_cfg(st[i].line, LINE_MAX_SZ, + me_rc_stats_cfm.rate_stats[i].rate_config, + &st[i].r_idx, 0); + + if (me_rc_stats_cfm.sw_retry_step != 0) + { + len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c", + me_rc_stats_cfm.retry_step_idx[me_rc_stats_cfm.sw_retry_step] == i ? '*' : ' '); + } + else + { + len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, " "); + } + len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c", + me_rc_stats_cfm.retry_step_idx[0] == i ? 'T' : ' '); + len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c", + me_rc_stats_cfm.retry_step_idx[1] == i ? 't' : ' '); + len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c ", + me_rc_stats_cfm.retry_step_idx[2] == i ? 'P' : ' '); + + tp = me_rc_stats_cfm.tp[i] / 10; + len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, " %4u.%1u", + tp / 10, tp % 10); + + eprob = ((me_rc_stats_cfm.rate_stats[i].probability * 1000) >> 16) + 1; + len += scnprintf(&st[i].line[len],LINE_MAX_SZ - len, + " %4u.%1u %5u(%6u) %6u", + eprob / 10, eprob % 10, + me_rc_stats_cfm.rate_stats[i].success, + me_rc_stats_cfm.rate_stats[i].attempts, + me_rc_stats_cfm.rate_stats[i].sample_skipped); + } + len = scnprintf(buf, bufsz , + "\nTX rate info for %02X:%02X:%02X:%02X:%02X:%02X:\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + + len += scnprintf(&buf[len], bufsz - len, + " # type rate tpt eprob ok( tot) skipped\n"); + + // add sorted statistics to the buffer + sort(st, no_samples, sizeof(st[0]), compare_idx, NULL); + for (i = 0; i < no_samples; i++) + { + len += scnprintf(&buf[len], bufsz - len, "%s\n", st[i].line); + } + + // display HE TB statistics if any + if (me_rc_stats_cfm.rate_stats[RC_HE_STATS_IDX].rate_config != 0) { + unsigned int tp, eprob; + struct rc_rate_stats *rate_stats = &me_rc_stats_cfm.rate_stats[RC_HE_STATS_IDX]; + int ru_index = rate_stats->ru_and_length & 0x07; + int ul_length = rate_stats->ru_and_length >> 3; + + len += scnprintf(&buf[len], bufsz - len, + "\nHE TB rate info:\n"); + + len += scnprintf(&buf[len], bufsz - len, + " type rate tpt eprob ok( tot) ul_length\n "); + len += print_rate_from_cfg(&buf[len], bufsz - len, rate_stats->rate_config, + NULL, ru_index); + + tp = me_rc_stats_cfm.tp[RC_HE_STATS_IDX] / 10; + len += scnprintf(&buf[len], bufsz - len, " %4u.%1u", + tp / 10, tp % 10); + + eprob = ((rate_stats->probability * 1000) >> 16) + 1; + len += scnprintf(&buf[len],bufsz - len, + " %4u.%1u %5u(%6u) %6u\n", + eprob / 10, eprob % 10, + rate_stats->success, + rate_stats->attempts, + ul_length); + } + + len += scnprintf(&buf[len], bufsz - len, "\n MPDUs AMPDUs AvLen trialP"); + len += scnprintf(&buf[len], bufsz - len, "\n%6u %6u %3d.%1d %6u\n", + me_rc_stats_cfm.ampdu_len, + me_rc_stats_cfm.ampdu_packets, + me_rc_stats_cfm.avg_ampdu_len >> 16, + ((me_rc_stats_cfm.avg_ampdu_len * 10) >> 16) % 10, + me_rc_stats_cfm.sample_wait); + + read = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + kfree(st); + + return read; +} + +DEBUGFS_READ_FILE_OPS(rc_stats); + +static ssize_t rwnx_dbgfs_rc_fixed_rate_idx_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_sta *sta = NULL; + struct rwnx_hw *priv = file->private_data; + u8 mac[6]; + char buf[20]; + int fixed_rate_idx = 1; + unsigned int formatmod, mcs, nss, bwTx, gi; + union rwnx_rate_ctrl_info rate_config; + union rwnx_rate_ctrl_info *r_cfg=&rate_config; + int error = 0; + size_t len = min_t(size_t, count, sizeof(buf) - 1); + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Get the station index from MAC address */ + sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + if (&mac[0] == NULL) + return 0; + sta = rwnx_get_sta(priv, mac); + if (sta == NULL) + return 0; + + /* Get the content of the file */ + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + //sscanf(buf, "%i\n", &fixed_rate_idx); + sscanf(buf, "%u %u %u %u %u",&formatmod, &mcs, &nss, &bwTx, &gi); + printk("%u %u %u %u %u\n",formatmod, mcs, nss, bwTx, gi); + + if((formatmod > 6) || (mcs > 11) || (nss > 8) || (bwTx > 6) || (gi > 3)){ + printk("error parameter"); + return len; + } + + /* Convert rate index into rate configuration */ + if ((fixed_rate_idx < 0) || (fixed_rate_idx >= (N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU))) + { + // disable fixed rate + rate_config.value = (u32)-1; + } + else + { + //idx_to_rate_cfg(fixed_rate_idx, &rate_config, NULL); + idx_to_rate_cfg1(formatmod, mcs, nss, bwTx, gi, &rate_config, NULL); + } + + printk("formatModTx=%u mcsIndexTx=%u bwTx=%u giAndPreTypeTx=%u\n",r_cfg->formatModTx,r_cfg->mcsIndexTx,r_cfg->bwTx,r_cfg->giAndPreTypeTx); + // Forward the request to the LMAC + if ((error = rwnx_send_me_rc_set_rate(priv, sta->sta_idx, + (u16)rate_config.value)) != 0) + { + return error; + } + + printk("send success \n"); + priv->debugfs.rc_config[sta->sta_idx] = (int)rate_config.value; + return len; + +} + + +DEBUGFS_WRITE_FILE_OPS(rc_fixed_rate_idx); + +static ssize_t rwnx_dbgfs_last_rx_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_sta *sta = NULL; + struct rwnx_hw *priv = file->private_data; + struct rwnx_rx_rate_stats *rate_stats; + char *buf; + int bufsz, i, len = 0; + ssize_t read; + unsigned int fmt, pre, bw, nss, mcs, gi; + u8 mac[6]; + struct rx_vector_1 *last_rx; + char hist[] = "##################################################"; + int hist_len = sizeof(hist) - 1; + u8 nrx; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* everything should fit in one call */ + if (*ppos) + return 0; + + /* Get the station index from MAC address */ + sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + //if (mac == NULL) + // return 0; + sta = rwnx_get_sta(priv, mac); + if (sta == NULL) + return 0; + + rate_stats = &sta->stats.rx_rate; + bufsz = (rate_stats->rate_cnt * ( 50 + hist_len) + 200); + buf = kmalloc(bufsz + 1, GFP_ATOMIC); + if (buf == NULL) + return 0; + + // Get number of RX paths + nrx = (priv->version_cfm.version_phy_1 & MDM_NRX_MASK) >> MDM_NRX_LSB; + + len += scnprintf(buf, bufsz, + "\nRX rate info for %02X:%02X:%02X:%02X:%02X:%02X:\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + + // Display Statistics + for (i = 0 ; i < rate_stats->size ; i++ ) + { + if (rate_stats->table[i]) { + union rwnx_rate_ctrl_info rate_config; + int percent = (rate_stats->table[i] * 1000) / rate_stats->cpt; + int p; + int ru_size; + + idx_to_rate_cfg(i, &rate_config, &ru_size); + len += print_rate_from_cfg(&buf[len], bufsz - len, + rate_config.value, NULL, ru_size); + p = (percent * hist_len) / 1000; + len += scnprintf(&buf[len], bufsz - len, ": %6d(%3d.%1d%%)%.*s\n", + rate_stats->table[i], + percent / 10, percent % 10, p, hist); + } + } + + // Display detailed info of the last received rate + last_rx = &sta->stats.last_rx.rx_vect1; + + len += scnprintf(&buf[len], bufsz - len,"\nLast received rate\n" + " type rate LDPC STBC BEAMFM DCM DOPPLER %s\n", + (nrx > 1) ? "rssi1(dBm) rssi2(dBm)" : "rssi(dBm)"); + + fmt = last_rx->format_mod; + bw = last_rx->ch_bw; + pre = last_rx->pre_type; + if (fmt >= FORMATMOD_HE_SU) { + mcs = last_rx->he.mcs; + nss = last_rx->he.nss; + gi = last_rx->he.gi_type; + if (fmt == FORMATMOD_HE_MU) + bw = last_rx->he.ru_size; + } else if (fmt == FORMATMOD_VHT) { + mcs = last_rx->vht.mcs; + nss = last_rx->vht.nss; + gi = last_rx->vht.short_gi; + } else if (fmt >= FORMATMOD_HT_MF) { + mcs = last_rx->ht.mcs % 8; + nss = last_rx->ht.mcs / 8;; + gi = last_rx->ht.short_gi; + } else { + BUG_ON((mcs = legrates_lut[last_rx->leg_rate].idx) == -1); + nss = 0; + gi = 0; + } + + len += print_rate(&buf[len], bufsz - len, fmt, nss, mcs, bw, gi, pre, NULL); + + /* flags for HT/VHT/HE */ + if (fmt >= FORMATMOD_HE_SU) { + len += scnprintf(&buf[len], bufsz - len, " %c %c %c %c %c", + last_rx->he.fec ? 'L' : ' ', + last_rx->he.stbc ? 'S' : ' ', + last_rx->he.beamformed ? 'B' : ' ', + last_rx->he.dcm ? 'D' : ' ', + last_rx->he.doppler ? 'D' : ' '); + } else if (fmt == FORMATMOD_VHT) { + len += scnprintf(&buf[len], bufsz - len, " %c %c %c ", + last_rx->vht.fec ? 'L' : ' ', + last_rx->vht.stbc ? 'S' : ' ', + last_rx->vht.beamformed ? 'B' : ' '); + } else if (fmt >= FORMATMOD_HT_MF) { + len += scnprintf(&buf[len], bufsz - len, " %c %c ", + last_rx->ht.fec ? 'L' : ' ', + last_rx->ht.stbc ? 'S' : ' '); + } else { + len += scnprintf(&buf[len], bufsz - len, " "); + } + if (nrx > 1) { + len += scnprintf(&buf[len], bufsz - len, " %-4d %d\n", + last_rx->rssi1, last_rx->rssi1); + } else { + len += scnprintf(&buf[len], bufsz - len, " %d\n", last_rx->rssi1); + } + + read = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return read; +} + +static ssize_t rwnx_dbgfs_last_rx_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_sta *sta = NULL; + struct rwnx_hw *priv = file->private_data; + u8 mac[6]; + + /* Get the station index from MAC address */ + sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + //if (mac == NULL) + // return 0; + sta = rwnx_get_sta(priv, mac); + if (sta == NULL) + return 0; + + /* Prevent from interrupt preemption as these statistics are updated under + * interrupt */ + spin_lock_bh(&priv->tx_lock); + memset(sta->stats.rx_rate.table, 0, + sta->stats.rx_rate.size * sizeof(sta->stats.rx_rate.table[0])); + sta->stats.rx_rate.cpt = 0; + sta->stats.rx_rate.rate_cnt = 0; + spin_unlock_bh(&priv->tx_lock); + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(last_rx); + +#endif /* CONFIG_RWNX_FULLMAC */ + +#ifdef CONFIG_RWNX_FULLMAC +static void rwnx_rc_stat_work(struct work_struct *ws) +{ + struct rwnx_debugfs *rwnx_debugfs = container_of(ws, struct rwnx_debugfs, + rc_stat_work); + struct rwnx_hw *rwnx_hw = container_of(rwnx_debugfs, struct rwnx_hw, + debugfs); + struct rwnx_sta *sta; + uint8_t ridx, sta_idx; + + ridx = rwnx_debugfs->rc_read; + sta_idx = rwnx_debugfs->rc_sta[ridx]; + if (sta_idx > (NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX)) { + WARN(1, "Invalid sta index %d", sta_idx); + return; + } + + rwnx_debugfs->rc_sta[ridx] = 0xFF; + ridx = (ridx + 1) % ARRAY_SIZE(rwnx_debugfs->rc_sta); + rwnx_debugfs->rc_read = ridx; + sta = &rwnx_hw->sta_table[sta_idx]; + if (!sta) { + WARN(1, "Invalid sta %d", sta_idx); + return; + } + + if (rwnx_debugfs->dir_sta[sta_idx] == NULL) { + /* register the sta */ + struct dentry *dir_rc = rwnx_debugfs->dir_rc; + struct dentry *dir_sta; + struct dentry *file; + char sta_name[18]; + struct rwnx_rx_rate_stats *rate_stats = &sta->stats.rx_rate; + int nb_rx_rate = N_CCK + N_OFDM; + struct rwnx_rc_config_save *rc_cfg, *next; + + if (sta->sta_idx >= NX_REMOTE_STA_MAX) { + scnprintf(sta_name, sizeof(sta_name), "bc_mc"); + } else { + scnprintf(sta_name, sizeof(sta_name), "%pM", sta->mac_addr); + } + + if (!(dir_sta = debugfs_create_dir(sta_name, dir_rc))) + goto error; + + rwnx_debugfs->dir_sta[sta->sta_idx] = dir_sta; + + file = debugfs_create_file("stats", S_IRUSR, dir_sta, rwnx_hw, + &rwnx_dbgfs_rc_stats_ops); + if (IS_ERR_OR_NULL(file)) + goto error_after_dir; + + file = debugfs_create_file("fixed_rate_idx", S_IWUSR , dir_sta, rwnx_hw, + &rwnx_dbgfs_rc_fixed_rate_idx_ops); + if (IS_ERR_OR_NULL(file)) + goto error_after_dir; + + file = debugfs_create_file("rx_rate", S_IRUSR | S_IWUSR, dir_sta, rwnx_hw, + &rwnx_dbgfs_last_rx_ops); + if (IS_ERR_OR_NULL(file)) + goto error_after_dir; + + if (rwnx_hw->mod_params->ht_on) + nb_rx_rate += N_HT; + + if (rwnx_hw->mod_params->vht_on) + nb_rx_rate += N_VHT; + + if (rwnx_hw->mod_params->he_on) + nb_rx_rate += N_HE_SU + N_HE_MU; + + rate_stats->table = kzalloc(nb_rx_rate * sizeof(rate_stats->table[0]), + GFP_KERNEL); + if (!rate_stats->table) + goto error_after_dir; + + rate_stats->size = nb_rx_rate; + rate_stats->cpt = 0; + rate_stats->rate_cnt = 0; + + /* By default enable rate contoller */ + rwnx_debugfs->rc_config[sta_idx] = -1; + + /* Unless we already fix the rate for this station */ + list_for_each_entry_safe(rc_cfg, next, &rwnx_debugfs->rc_config_save, list) { + if (jiffies_to_msecs(jiffies - rc_cfg->timestamp) > RC_CONFIG_DUR) { + list_del(&rc_cfg->list); + kfree(rc_cfg); + } else if (!memcmp(rc_cfg->mac_addr, sta->mac_addr, ETH_ALEN)) { + rwnx_debugfs->rc_config[sta_idx] = rc_cfg->rate; + list_del(&rc_cfg->list); + kfree(rc_cfg); + break; + } + } + + if ((rwnx_debugfs->rc_config[sta_idx] >= 0) && + rwnx_send_me_rc_set_rate(rwnx_hw, sta_idx, + (u16)rwnx_debugfs->rc_config[sta_idx])) + rwnx_debugfs->rc_config[sta_idx] = -1; + + } else { + /* unregister the sta */ + if (sta->stats.rx_rate.table) { + kfree(sta->stats.rx_rate.table); + sta->stats.rx_rate.table = NULL; + } + sta->stats.rx_rate.size = 0; + sta->stats.rx_rate.cpt = 0; + sta->stats.rx_rate.rate_cnt = 0; + + /* If fix rate was set for this station, save the configuration in case + we reconnect to this station within RC_CONFIG_DUR msec */ + if (rwnx_debugfs->rc_config[sta_idx] >= 0) { + struct rwnx_rc_config_save *rc_cfg; + rc_cfg = kmalloc(sizeof(*rc_cfg), GFP_KERNEL); + if (rc_cfg) { + rc_cfg->rate = rwnx_debugfs->rc_config[sta_idx]; + rc_cfg->timestamp = jiffies; + memcpy(rc_cfg->mac_addr, sta->mac_addr, ETH_ALEN); + list_add_tail(&rc_cfg->list, &rwnx_debugfs->rc_config_save); + } + } + + debugfs_remove_recursive(rwnx_debugfs->dir_sta[sta_idx]); + rwnx_debugfs->dir_sta[sta->sta_idx] = NULL; + } + + return; + + error_after_dir: + debugfs_remove_recursive(rwnx_debugfs->dir_sta[sta_idx]); + rwnx_debugfs->dir_sta[sta->sta_idx] = NULL; + error: + dev_err(rwnx_hw->dev, + "Error while (un)registering debug entry for sta %d\n", sta_idx); +} + +void _rwnx_dbgfs_rc_stat_write(struct rwnx_debugfs *rwnx_debugfs, uint8_t sta_idx) +{ + uint8_t widx = rwnx_debugfs->rc_write; + if (rwnx_debugfs->rc_sta[widx] != 0XFF) { + WARN(1, "Overlap in debugfs rc_sta table\n"); + } + + if (rwnx_debugfs->unregistering) + return; + + rwnx_debugfs->rc_sta[widx] = sta_idx; + widx = (widx + 1) % ARRAY_SIZE(rwnx_debugfs->rc_sta); + rwnx_debugfs->rc_write = widx; + + schedule_work(&rwnx_debugfs->rc_stat_work); +} + +void rwnx_dbgfs_register_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) +{ + _rwnx_dbgfs_rc_stat_write(&rwnx_hw->debugfs, sta->sta_idx); +} + +void rwnx_dbgfs_unregister_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) +{ + _rwnx_dbgfs_rc_stat_write(&rwnx_hw->debugfs, sta->sta_idx); +} +#endif /* CONFIG_RWNX_FULLMAC */ + +int rwnx_dbgfs_register(struct rwnx_hw *rwnx_hw, const char *name) +{ +#ifdef CONFIG_RWNX_FULLMAC + struct dentry *phyd = rwnx_hw->wiphy->debugfsdir; + struct dentry *dir_rc; +#endif /* CONFIG_RWNX_FULLMAC */ + struct rwnx_debugfs *rwnx_debugfs = &rwnx_hw->debugfs; + struct dentry *dir_drv, *dir_diags; + + if (!(dir_drv = debugfs_create_dir(name, phyd))) + return -ENOMEM; + + rwnx_debugfs->dir = dir_drv; + rwnx_debugfs->unregistering = false; + + if (!(dir_diags = debugfs_create_dir("diags", dir_drv))) + goto err; + +#ifdef CONFIG_RWNX_FULLMAC + if (!(dir_rc = debugfs_create_dir("rc", dir_drv))) + goto err; + rwnx_debugfs->dir_rc = dir_rc; + INIT_WORK(&rwnx_debugfs->rc_stat_work, rwnx_rc_stat_work); + INIT_LIST_HEAD(&rwnx_debugfs->rc_config_save); + rwnx_debugfs->rc_write = rwnx_debugfs->rc_read = 0; + memset(rwnx_debugfs->rc_sta, 0xFF, sizeof(rwnx_debugfs->rc_sta)); +#endif + + DEBUGFS_ADD_U32(tcp_pacing_shift, dir_drv, &rwnx_hw->tcp_pacing_shift, + S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(stats, dir_drv, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(sys_stats, dir_drv, S_IRUSR); + DEBUGFS_ADD_FILE(txq, dir_drv, S_IRUSR); + DEBUGFS_ADD_FILE(acsinfo, dir_drv, S_IRUSR); +#ifdef CONFIG_RWNX_MUMIMO_TX + DEBUGFS_ADD_FILE(mu_group, dir_drv, S_IRUSR); +#endif + DEBUGFS_ADD_FILE(regdbg, dir_drv, S_IWUSR); + DEBUGFS_ADD_FILE(vendor_hwconfig, dir_drv,S_IWUSR); + DEBUGFS_ADD_FILE(vendor_swconfig, dir_drv,S_IWUSR); + DEBUGFS_ADD_FILE(vendor_hwconfig_x2, dir_drv,S_IWUSR); + DEBUGFS_ADD_FILE(vendor_swconfig_x2, dir_drv,S_IWUSR); + DEBUGFS_ADD_FILE(agg_disable, dir_drv,S_IWUSR); + DEBUGFS_ADD_FILE(set_roc, dir_drv,S_IWUSR); + DEBUGFS_ADD_FILE(dbg_level, dir_drv, S_IWUSR | S_IRUSR); + +#ifdef CONFIG_RWNX_P2P_DEBUGFS + { + /* Create a p2p directory */ + struct dentry *dir_p2p; + if (!(dir_p2p = debugfs_create_dir("p2p", dir_drv))) + goto err; + + /* Add file allowing to control Opportunistic PS */ + DEBUGFS_ADD_FILE(oppps, dir_p2p, S_IRUSR); + /* Add file allowing to control Notice of Absence */ + DEBUGFS_ADD_FILE(noa, dir_p2p, S_IRUSR); + } +#endif /* CONFIG_RWNX_P2P_DEBUGFS */ + + if (rwnx_hw->fwlog_en) { + rwnx_fw_log_init(&rwnx_hw->debugfs.fw_log); + DEBUGFS_ADD_FILE(fw_log, dir_drv, S_IWUSR | S_IRUSR); + } +#ifdef CONFIG_RWNX_RADAR + { + struct dentry *dir_radar, *dir_sec; + if (!(dir_radar = debugfs_create_dir("radar", dir_drv))) + goto err; + + DEBUGFS_ADD_FILE(pulses_prim, dir_radar, S_IRUSR); + DEBUGFS_ADD_FILE(detected, dir_radar, S_IRUSR); + DEBUGFS_ADD_FILE(enable, dir_radar, S_IRUSR); + + if (rwnx_hw->phy.cnt == 2) { + DEBUGFS_ADD_FILE(pulses_sec, dir_radar, S_IRUSR); + + if (!(dir_sec = debugfs_create_dir("sec", dir_radar))) + goto err; + + DEBUGFS_ADD_FILE(band, dir_sec, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(type, dir_sec, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(prim20, dir_sec, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(center1, dir_sec, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(center2, dir_sec, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(set, dir_sec, S_IWUSR | S_IRUSR); + } + } +#endif /* CONFIG_RWNX_RADAR */ + return 0; + +err: + rwnx_dbgfs_unregister(rwnx_hw); + return -ENOMEM; +} + +void rwnx_dbgfs_unregister(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_debugfs *rwnx_debugfs = &rwnx_hw->debugfs; +#ifdef CONFIG_RWNX_FULLMAC + struct rwnx_rc_config_save *cfg, *next; +#endif + +#ifdef CONFIG_RWNX_FULLMAC + list_for_each_entry_safe(cfg, next, &rwnx_debugfs->rc_config_save, list) { + list_del(&cfg->list); + kfree(cfg); + } +#endif /* CONFIG_RWNX_FULLMAC */ + + if (rwnx_hw->fwlog_en) + rwnx_fw_log_deinit(&rwnx_hw->debugfs.fw_log); + + if (!rwnx_hw->debugfs.dir) + return; + + rwnx_debugfs->unregistering = true; +#ifdef CONFIG_RWNX_FULLMAC + flush_work(&rwnx_debugfs->rc_stat_work); +#endif + debugfs_remove_recursive(rwnx_hw->debugfs.dir); + rwnx_hw->debugfs.dir = NULL; +} + +#endif // diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_debugfs.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_debugfs.h new file mode 100644 index 0000000..5d93f12 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_debugfs.h @@ -0,0 +1,203 @@ +/** + ****************************************************************************** + * + * @file rwnx_debugfs.h + * + * @brief Miscellaneous utility function definitions + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + + +#ifndef _RWNX_DEBUGFS_H_ +#define _RWNX_DEBUGFS_H_ + +#include +#include +#include +#include "rwnx_fw_trace.h" + +struct rwnx_hw; +struct rwnx_sta; + +/* some macros taken from iwlwifi */ +/* TODO: replace with generic read and fill read buffer in open to avoid double + * reads */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, rwnx_hw, \ + &rwnx_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X64(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x64(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_U64(name, parent, ptr, mode) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_u64(#name, mode, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) +#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \ + debugfs_create_u32(#name, mode, \ + parent, ptr); \ +} while (0) +#else +#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_u32(#name, mode, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ + } while (0) +#endif + + +/* file operation */ +#define DEBUGFS_READ_FUNC(name) \ + static ssize_t rwnx_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos); + +#define DEBUGFS_WRITE_FUNC(name) \ + static ssize_t rwnx_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf,\ + size_t count, loff_t *ppos); + +#define DEBUGFS_OPEN_FUNC(name) \ + static int rwnx_dbgfs_##name##_open(struct inode *inode, \ + struct file *file); + +#define DEBUGFS_RELEASE_FUNC(name) \ + static int rwnx_dbgfs_##name##_release(struct inode *inode, \ + struct file *file); + +#define DEBUGFS_READ_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ +static const struct file_operations rwnx_dbgfs_##name##_ops = { \ + .read = rwnx_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations rwnx_dbgfs_##name##_ops = { \ + .write = rwnx_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations rwnx_dbgfs_##name##_ops = { \ + .write = rwnx_dbgfs_##name##_write, \ + .read = rwnx_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_OPEN_RELEASE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ + DEBUGFS_OPEN_FUNC(name); \ + DEBUGFS_RELEASE_FUNC(name); \ +static const struct file_operations rwnx_dbgfs_##name##_ops = { \ + .write = rwnx_dbgfs_##name##_write, \ + .read = rwnx_dbgfs_##name##_read, \ + .open = rwnx_dbgfs_##name##_open, \ + .release = rwnx_dbgfs_##name##_release, \ + .llseek = generic_file_llseek, \ +}; + + +#ifdef CONFIG_RWNX_DEBUGFS + +struct rwnx_debugfs { + unsigned long long rateidx; + struct dentry *dir; + bool trace_prst; + + char helper_cmd[64]; + //struct work_struct helper_work; + bool helper_scheduled; + spinlock_t umh_lock; + bool unregistering; + +#ifndef CONFIG_RWNX_FHOST + struct rwnx_fw_log fw_log; +#endif /* CONFIG_RWNX_FHOST */ + +#ifdef CONFIG_RWNX_FULLMAC + struct work_struct rc_stat_work; + uint8_t rc_sta[NX_REMOTE_STA_MAX]; + uint8_t rc_write; + uint8_t rc_read; + struct dentry *dir_rc; + struct dentry *dir_sta[NX_REMOTE_STA_MAX]; + int rc_config[NX_REMOTE_STA_MAX]; + struct list_head rc_config_save; +#endif +}; + +#ifdef CONFIG_RWNX_FULLMAC + +// Max duration in msecs to save rate config for a sta after disconnection +#define RC_CONFIG_DUR 600000 + +struct rwnx_rc_config_save { + struct list_head list; + unsigned long timestamp; + int rate; + u8 mac_addr[ETH_ALEN]; +}; +#endif + +int rwnx_dbgfs_register(struct rwnx_hw *rwnx_hw, const char *name); +void rwnx_dbgfs_unregister(struct rwnx_hw *rwnx_hw); +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_dbgfs_register_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta); +void rwnx_dbgfs_unregister_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta); +#endif +#else + +struct rwnx_debugfs { +}; + +static inline int rwnx_dbgfs_register(struct rwnx_hw *rwnx_hw, const char *name) { return 0; } +static inline void rwnx_dbgfs_unregister(struct rwnx_hw *rwnx_hw) {} +#ifdef CONFIG_RWNX_FULLMAC +static inline void rwnx_dbgfs_register_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) {} +static inline void rwnx_dbgfs_unregister_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) {} +#endif +#endif /* CONFIG_RWNX_DEBUGFS */ + + +#endif /* _RWNX_DEBUGFS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_defs.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_defs.h new file mode 100644 index 0000000..04dc8f3 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_defs.h @@ -0,0 +1,928 @@ +/** + ****************************************************************************** + * + * @file rwnx_defs.h + * + * @brief Main driver structure declarations for fullmac driver + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_DEFS_H_ +#define _RWNX_DEFS_H_ + +#include +#include +#include +#include +#include +#include + +#include "rwnx_mod_params.h" +#include "rwnx_debugfs.h" +#include "rwnx_tx.h" +#include "rwnx_rx.h" +#include "rwnx_radar.h" +#include "rwnx_utils.h" +#include "rwnx_mu_group.h" +#include "rwnx_platform.h" +#include "rwnx_cmds.h" +#include "rwnx_compat.h" +#ifdef CONFIG_FILTER_TCP_ACK +#include "aicwf_tcp_ack.h" +#endif +#ifdef CONFIG_BAND_STEERING +#include "aicwf_steering.h" +#endif + +#ifdef AICWF_SDIO_SUPPORT +#include "aicwf_sdio.h" +#include "sdio_host.h" +#endif + +#ifdef AICWF_USB_SUPPORT +#include "usb_host.h" +#endif + +#ifdef CONFIG_BR_SUPPORT +#include "aic_br_ext.h" +#endif /* CONFIG_BR_SUPPORT */ + +#define WPI_HDR_LEN 18 +#define WPI_PN_LEN 16 +#define WPI_PN_OFST 2 +#define WPI_MIC_LEN 16 +#define WPI_KEY_LEN 32 +#define WPI_SUBKEY_LEN 16 // WPI key is actually two 16bytes key + +#define LEGACY_PS_ID 0 +#define UAPSD_ID 1 + +#define PS_SP_INTERRUPTED 255 +#define MAC_ADDR_LEN 6 + +//because android kernel 5.15 uses kernel 6.0 or 6.1 kernel api +#ifdef ANDROID_PLATFORM +#define HIGH_KERNEL_VERSION KERNEL_VERSION(5, 15, 41) +#define HIGH_KERNEL_VERSION2 KERNEL_VERSION(5, 15, 41) +#define HIGH_KERNEL_VERSION3 KERNEL_VERSION(5, 15, 104) +#define HIGH_KERNEL_VERSION4 KERNEL_VERSION(6, 1, 0) +#else +#define HIGH_KERNEL_VERSION KERNEL_VERSION(6, 0, 0) +#define HIGH_KERNEL_VERSION2 KERNEL_VERSION(6, 1, 0) +#define HIGH_KERNEL_VERSION3 KERNEL_VERSION(6, 3, 0) +#define HIGH_KERNEL_VERSION4 KERNEL_VERSION(6, 3, 0) +#endif + + + +#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION +#define IEEE80211_MAX_AMPDU_BUF IEEE80211_MAX_AMPDU_BUF_HE +#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB +#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB +#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU +#endif + +#ifndef IEEE80211_MAX_AMPDU_BUF +#define IEEE80211_MAX_AMPDU_BUF 0x100 +#endif +#ifndef IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB +#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08 +#endif +#ifndef IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB +#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04 +#endif +#ifndef IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA +#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40 +#endif + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) +enum nl80211_ac { + NL80211_AC_VO, + NL80211_AC_VI, + NL80211_AC_BE, + NL80211_AC_BK, + NL80211_NUM_ACS +}; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) +struct ieee80211_vht_operation { + u8 vht_op_info_chwidth; + u8 vht_op_info_chan_center_freq_seg1_idx; + u8 vht_op_info_chan_center_freq_seg2_idx; + __le16 vht_basic_mcs_set; +} __packed; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) +#define NL80211_IFTYPE_P2P_DEVICE 10 +#define IEEE80211_RADIOTAP_AMPDU_STATUS 20 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) +#define IEEE80211_RADIOTAP_VHT 21 +#define IEEE80211_RADIOTAP_VHT_KNOWN_GI 0x0004 +#define IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH 0x0040 + +#define IEEE80211_RADIOTAP_VHT_FLAG_STBC 0x01 +#define IEEE80211_RADIOTAP_VHT_FLAG_SGI 0x04 + +#define NL80211_FEATURE_CELL_BASE_REG_HINTS 1 << 3 +#define NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL 1 << 4 +#define NL80211_FEATURE_SAE 1 << 5 +#define NL80211_FEATURE_LOW_PRIORITY_SCAN 1 << 6 +#define NL80211_FEATURE_SCAN_FLUSH 1 << 7 +#define NL80211_FEATURE_AP_SCAN 1 << 8 +#define NL80211_FEATURE_VIF_TXPOWER 1 << 9 +#define NL80211_FEATURE_NEED_OBSS_SCAN 1 << 10 +#define NL80211_FEATURE_P2P_GO_CTWIN 1 << 11 +#define NL80211_FEATURE_P2P_GO_OPPPS 1 << 12 + +/* 802.11ac VHT Capabilities */ +#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 +#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 +#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002 +#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 +#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 +#define IEEE80211_VHT_CAP_RXLDPC 0x00000010 +#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 +#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 +#define IEEE80211_VHT_CAP_TXSTBC 0x00000080 +#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100 +#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200 +#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 +#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 +#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 +#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 +#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000 +#define IEEE80211_VHT_CAP_SOUNDING_DIMENTION_MAX 0x00030000 +#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000 +#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000 +#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000 +#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000 +#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23 +#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \ + (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT) +#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000 +#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 +#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 +#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 + +enum ieee80211_vht_mcs_support { + IEEE80211_VHT_MCS_SUPPORT_0_7 = 0, + IEEE80211_VHT_MCS_SUPPORT_0_8 = 1, + IEEE80211_VHT_MCS_SUPPORT_0_9 = 2, + IEEE80211_VHT_MCS_NOT_SUPPORTED = 3, +}; + +enum nl80211_chan_width { + NL80211_CHAN_WIDTH_20_NOHT, + NL80211_CHAN_WIDTH_20, + NL80211_CHAN_WIDTH_40, + NL80211_CHAN_WIDTH_80, + NL80211_CHAN_WIDTH_80P80, + NL80211_CHAN_WIDTH_160, +}; + +struct cfg80211_chan_def { + struct ieee80211_channel *chan; + enum nl80211_chan_width width; + u32 center_freq1; + u32 center_freq2; +}; + +enum nl80211_mesh_power_mode { + NL80211_MESH_POWER_UNKNOWN, + NL80211_MESH_POWER_ACTIVE, + NL80211_MESH_POWER_LIGHT_SLEEP, + NL80211_MESH_POWER_DEEP_SLEEP, + __NL80211_MESH_POWER_AFTER_LAST, + NL80211_MESH_POWER_MAX = __NL80211_MESH_POWER_AFTER_LAST - 1 +}; +#endif + +#ifdef CONFIG_BAND_STEERING +enum band_type { + BAND_ON_24G = 0, + BAND_ON_5G = 1, + BAND_ON_60G = 2, + BAND_ON_6G = 3, + BAND_MAX, +}; + +enum WIFI_FRAME_TYPE { + WIFI_MGT_TYPE = (0), +}; + +enum WIFI_FRAME_SUBTYPE { + WIFI_ASSOCREQ = (0 | WIFI_MGT_TYPE), + WIFI_PROBEREQ = (BIT(6) | WIFI_MGT_TYPE), + WIFI_AUTH = (BIT(7) | BIT(5) | BIT(4) | WIFI_MGT_TYPE), +}; + +struct tmp_feature_sta { + u8_l sta_idx; + u8_l supported_band; +}; + +#define MAX_PENDING_PROBES 3 +struct ap_probe_rsp { + u8_l da[6]; + struct work_struct rsp_work; + bool in_use; +}; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) +#define NL80211_MESHCONF_POWER_MODE 26 + +/* + * TDLS capabililites to be enabled in the 5th byte of the + * @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) +#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) + +#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) + +/* TDLS specific payload type in the LLC/SNAP header */ +#define WLAN_TDLS_SNAP_RFTYPE 0x2 + +#endif + + +/** + * struct rwnx_bcn - Information of the beacon in used (AP mode) + * + * @head: head portion of beacon (before TIM IE) + * @tail: tail portion of beacon (after TIM IE) + * @ies: extra IEs (not used ?) + * @head_len: length of head data + * @tail_len: length of tail data + * @ies_len: length of extra IEs data + * @tim_len: length of TIM IE + * @len: Total beacon len (head + tim + tail + extra) + * @dtim: dtim period + */ +struct rwnx_bcn { + u8 *head; + u8 *tail; + u8 *ies; + size_t head_len; + size_t tail_len; + size_t ies_len; + size_t tim_len; + size_t len; + u8 dtim; +}; + +/** + * struct rwnx_key - Key information + * + * @hw_idx: Idx of the key from hardware point of view + */ +struct rwnx_key { + u8 hw_idx; +}; + +/** + * Structure containing information about a Mesh Path + */ +struct rwnx_mesh_path { + struct list_head list; /* For rwnx_vif.mesh_paths */ + u8 path_idx; /* Path Index */ + struct mac_addr tgt_mac_addr; /* Target MAC Address */ + struct rwnx_sta *p_nhop_sta; /* Pointer to the Next Hop STA */ +}; + +struct rwnx_mesh_proxy { + struct list_head list; /* For rwnx_vif.mesh_proxy */ + struct mac_addr ext_sta_addr; /* Address of the External STA */ + struct mac_addr proxy_addr; /* Proxy MAC Address */ + bool local; /* Indicate if interface is a proxy for the device */ +}; + +/** + * struct rwnx_csa - Information for CSA (Channel Switch Announcement) + * + * @vif: Pointer to the vif doing the CSA + * @bcn: Beacon to use after CSA + * @elem: IPC buffer to send the new beacon to the fw + * @chandef: defines the channel to use after the switch + * @count: Current csa counter + * @status: Status of the CSA at fw level + * @ch_idx: Index of the new channel context + * @work: work scheduled at the end of CSA + */ +struct rwnx_csa { + struct rwnx_vif *vif; + struct rwnx_bcn bcn; + struct rwnx_ipc_elem_var elem; + struct cfg80211_chan_def chandef; + int count; + int status; + int ch_idx; + struct work_struct work; +}; + +struct apm_probe_sta { + u8 sta_mac_addr[6]; + u8 vif_idx; + u64 probe_id; + struct work_struct apmprobestaWork; + struct workqueue_struct *apmprobesta_wq; +}; + +/// Possible States of the TDLS link. +enum tdls_status_tag { + /// TDLS link is not active (no TDLS peer connected) + TDLS_LINK_IDLE, + /// TDLS Setup Request transmitted + TDLS_SETUP_REQ_TX, + /// TDLS Setup Response transmitted + TDLS_SETUP_RSP_TX, + /// TDLS link is active (TDLS peer connected) + TDLS_LINK_ACTIVE, + /// TDLS Max Number of states. + TDLS_STATE_MAX +}; + +/* + * Structure used to save information relative to the TDLS peer. + * This is also linked within the rwnx_hw vifs list. + * + */ +struct rwnx_tdls { + bool active; /* Indicate if TDLS link is active */ + bool initiator; /* Indicate if TDLS peer is the TDLS initiator */ + bool chsw_en; /* Indicate if channel switch is enabled */ + u8 last_tid; /* TID of the latest MPDU transmitted over the + TDLS direct link to the TDLS STA */ + u16 last_sn; /* Sequence number of the latest MPDU transmitted + over the TDLS direct link to the TDLS STA */ + bool ps_on; /* Indicate if the power save is enabled on the + TDLS STA */ + bool chsw_allowed; /* Indicate if TDLS channel switch is allowed */ +}; + + +/** + * enum rwnx_ap_flags - AP flags + * + * @RWNX_AP_ISOLATE Isolate clients (i.e. Don't brige packets transmitted by + * one client for another one) + */ +enum rwnx_ap_flags { + RWNX_AP_ISOLATE = BIT(0), +}; + +/* + * Structure used to save information relative to the managed interfaces. + * This is also linked within the rwnx_hw vifs list. + * + */ +struct rwnx_vif { + struct list_head list; + struct rwnx_hw *rwnx_hw; + struct wireless_dev wdev; + struct net_device *ndev; + struct net_device_stats net_stats; + struct rwnx_key key[6]; + unsigned long drv_flags; + atomic_t drv_conn_state; + u8 drv_vif_index; /* Identifier of the VIF in driver */ + u8 vif_index; /* Identifier of the station in FW */ + u8 ch_index; /* Channel context identifier */ + bool up; /* Indicate if associated netdev is up + (i.e. Interface is created at fw level) */ + bool use_4addr; /* Should we use 4addresses mode */ + bool is_resending; /* Indicate if a frame is being resent on this interface */ + bool user_mpm; /* In case of Mesh Point VIF, indicate if MPM is handled by userspace */ + bool roc_tdls; /* Indicate if the ROC has been called by a + TDLS station */ + u8 tdls_status; /* Status of the TDLS link */ + bool tdls_chsw_prohibited; /* Indicate if TDLS Channel Switch is prohibited */ + bool wep_enabled; /* 1 if WEP is enabled */ + bool wep_auth_err; /* 1 if auth status code is not supported auth alg when WEP enabled */ + enum nl80211_auth_type last_auth_type; /* Authentication type (algorithm) sent in the last connection + when WEP enabled */ + union + { + struct + { + struct rwnx_sta *ap; /* Pointer to the peer STA entry allocated for + the AP */ + struct rwnx_sta *tdls_sta; /* Pointer to the TDLS station */ + bool external_auth; /* Indicate if external authentication is in progress */ + u32 group_cipher_type; + u32 paired_cipher_type; + //connected network info start + char ssid[33];//ssid max is 32, but this has one spare for '\0' + int ssid_len; + u8 bssid[ETH_ALEN]; + u32 conn_owner_nlportid; + bool is_roam; + //connected network info end + } sta; + struct + { + u16 flags; /* see rwnx_ap_flags */ + struct list_head sta_list; /* List of STA connected to the AP */ + struct rwnx_bcn bcn; /* beacon */ + u8 bcmc_index; /* Index of the BCMC sta to use */ + #if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL) + u8 aic_index; + #endif + struct rwnx_csa *csa; + + struct list_head mpath_list; /* List of Mesh Paths used on this interface */ + struct list_head proxy_list; /* List of Proxies Information used on this interface */ + bool create_path; /* Indicate if we are waiting for a MESH_CREATE_PATH_CFM + message */ + int generation; /* Increased each time the list of Mesh Paths is updated */ +#ifdef CONFIG_BAND_STEERING + u8_l tmp_sta_idx; + enum band_type band; + u32_l freq; + bool start; +#endif + enum nl80211_mesh_power_mode mesh_pm; /* mesh power save mode currently set in firmware */ + enum nl80211_mesh_power_mode next_mesh_pm; /* mesh power save mode for next peer */ + } ap; + struct + { + struct rwnx_vif *master; /* pointer on master interface */ + struct rwnx_sta *sta_4a; + } ap_vlan; + }; + + u8_l key_has_add; + u8_l is_p2p_vif; + struct apm_probe_sta sta_probe; + + #ifdef CONFIG_BR_SUPPORT + spinlock_t br_ext_lock; + /* unsigned int macclone_completed; */ + struct nat25_network_db_entry *nethash[NAT25_HASH_SIZE]; + int pppoe_connection_in_progress; + unsigned char pppoe_addr[MACADDRLEN]; + unsigned char scdb_mac[MACADDRLEN]; + unsigned char scdb_ip[4]; + struct nat25_network_db_entry *scdb_entry; + unsigned char br_mac[MACADDRLEN]; + unsigned char br_ip[4]; + + struct br_ext_info ethBrExtInfo; + #endif /* CONFIG_BR_SUPPORT */ +#ifdef CONFIG_BAND_STEERING + struct workqueue_struct *rsp_wq; + struct timer_list steer_timer; + struct work_struct steer_work; + struct b_steer_priv bsteerpriv; + struct ap_probe_rsp pb_pool[MAX_PENDING_PROBES]; +#endif + +}; + +#define RWNX_VIF_TYPE(rwnx_vif) (rwnx_vif->wdev.iftype) + +/** + * Structure used to store information relative to PS mode. + * + * @active: True when the sta is in PS mode. + * If false, other values should be ignored + * @pkt_ready: Number of packets buffered for the sta in drv's txq + * (1 counter for Legacy PS and 1 for U-APSD) + * @sp_cnt: Number of packets that remain to be pushed in the service period. + * 0 means that no service period is in progress + * (1 counter for Legacy PS and 1 for U-APSD) + */ +struct rwnx_sta_ps { + bool active; + u16 pkt_ready[2]; + u16 sp_cnt[2]; +}; + +/** + * struct rwnx_rx_rate_stats - Store statistics for RX rates + * + * @table: Table indicating how many frame has been receive which each + * rate index. Rate index is the same as the one used by RC algo for TX + * @size: Size of the table array + * @cpt: number of frames received + */ +struct rwnx_rx_rate_stats { + int *table; + int size; + int cpt; + int rate_cnt; +}; + +/** + * struct rwnx_sta_stats - Structure Used to store statistics specific to a STA + * + * @last_rx: Hardware vector of the last received frame + * @rx_rate: Statistics of the received rates + */ +struct rwnx_sta_stats { + u32 rx_pkts; + u32 tx_pkts; + u64 rx_bytes; + u64 tx_bytes; + unsigned long last_act; + struct hw_vect last_rx; + struct rwnx_rx_rate_stats rx_rate; + u32 last_chan_time; + u32 last_chan_busy_time; + u32 tx_ack_succ_stat; + u32 tx_ack_fail_stat; + u32 last_chan_tx_busy_time; +}; + +#if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL) +struct aic_sta { + u8 sta_idx; /* Identifier of the station */ + bool he; /* Flag indicating if the station supports HE */ + bool vht; /* Flag indicating if the station supports VHT */ + + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) + __le32 vht_cap_info; + struct ieee80211_vht_mcs_info supp_mcs; +#endif +}; +#endif + +/* + * Structure used to save information relative to the managed stations. + */ +struct rwnx_sta { + struct list_head list; + u16 aid; /* association ID */ + u8 sta_idx; /* Identifier of the station */ + u8 vif_idx; /* Identifier of the VIF (fw id) the station + belongs to */ + u8 vlan_idx; /* Identifier of the VLAN VIF (fw id) the station + belongs to (= vif_idx if no vlan in used) */ + enum nl80211_band band; /* Band */ + enum nl80211_chan_width width; /* Channel width */ + u16 center_freq; /* Center frequency */ + u32 center_freq1; /* Center frequency 1 */ + u32 center_freq2; /* Center frequency 2 */ + u8 ch_idx; /* Identifier of the channel + context the station belongs to */ + bool qos; /* Flag indicating if the station + supports QoS */ + u8 acm; /* Bitfield indicating which queues + have AC mandatory */ + u16 uapsd_tids; /* Bitfield indicating which tids are subject to + UAPSD */ + u8 mac_addr[ETH_ALEN]; /* MAC address of the station */ + struct rwnx_key key; + bool valid; /* Flag indicating if the entry is valid */ + struct rwnx_sta_ps ps; /* Information when STA is in PS (AP only) */ +#ifdef CONFIG_RWNX_BFMER + struct rwnx_bfmer_report *bfm_report; /* Beamforming report to be used for + VHT TX Beamforming */ +#ifdef CONFIG_RWNX_MUMIMO_TX + struct rwnx_sta_group_info group_info; /* MU grouping information for the STA */ +#endif /* CONFIG_RWNX_MUMIMO_TX */ +#endif /* CONFIG_RWNX_BFMER */ + + bool ht; /* Flag indicating if the station + supports HT */ + bool vht; /* Flag indicating if the station + supports VHT */ + u32 ac_param[AC_MAX]; /* EDCA parameters */ + struct rwnx_tdls tdls; /* TDLS station information */ + struct rwnx_sta_stats stats; + enum nl80211_mesh_power_mode mesh_pm; /* link-specific mesh power save mode */ +#ifdef CONFIG_DYNAMIC_PERPWR + s8_l rssi_save; + s8_l per_pwrloss; + struct work_struct per_pwr_work; + unsigned long last_jiffies; +#endif +#ifdef CONFIG_BAND_STEERING + u32_l link_time; + s8_l rssi; + u8_l support_band; +#endif +}; + +static inline const u8 *rwnx_sta_addr(struct rwnx_sta *rwnx_sta) { + return rwnx_sta->mac_addr; +} + +#ifdef CONFIG_RWNX_SPLIT_TX_BUF +struct rwnx_amsdu_stats { + int done; + int failed; +}; +#endif + +struct rwnx_stats { + int cfm_balance[NX_TXQ_CNT]; + unsigned long last_rx, last_tx; /* jiffies */ + int ampdus_tx[IEEE80211_MAX_AMPDU_BUF]; + int ampdus_rx[IEEE80211_MAX_AMPDU_BUF]; + int ampdus_rx_map[4]; + int ampdus_rx_miss; +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + struct rwnx_amsdu_stats amsdus[NX_TX_PAYLOAD_MAX]; +#endif + int amsdus_rx[64]; +}; + +struct rwnx_sec_phy_chan { + u16 prim20_freq; + u16 center_freq1; + u16 center_freq2; + enum nl80211_band band; + u8 type; +}; + +/* Structure that will contains all RoC information received from cfg80211 */ +struct rwnx_roc_elem { + struct wireless_dev *wdev; + struct ieee80211_channel *chan; + unsigned int duration; + /* Used to avoid call of CFG80211 callback upon expiration of RoC */ + bool mgmt_roc; + /* Indicate if we have switch on the RoC channel */ + bool on_chan; +}; + +/* Structure containing channel survey information received from MAC */ +struct rwnx_survey_info { + // Filled + u32 filled; + // Amount of time in ms the radio spent on the channel + u32 chan_time_ms; + // Amount of time the primary channel was sensed busy + u32 chan_time_busy_ms; + // Noise in dbm + s8 noise_dbm; +}; + +#define RWNX_CH_NOT_SET 0xFF +#define RWNX_INVALID_VIF 0xFF +#define RWNX_INVALID_STA 0xFF + +/* Structure containing channel context information */ +struct rwnx_chanctx { + struct cfg80211_chan_def chan_def; /* channel description */ + u8 count; /* number of vif using this ctxt */ +}; + +/** + * rwnx_phy_info - Phy information + * + * @phy_cnt: Number of phy interface + * @cfg: Configuration send to firmware + * @sec_chan: Channel configuration of the second phy interface (if phy_cnt > 1) + * @limit_bw: Set to true to limit BW on requested channel. Only set to use + * VHT with old radio that don't support 80MHz (deprecated) + */ +struct rwnx_phy_info { + u8 cnt; + struct phy_cfg_tag cfg; + struct rwnx_sec_phy_chan sec_chan; + bool limit_bw; +}; + +struct defrag_ctrl_info { + struct list_head list; + u8 sta_idx; + u8 tid; + u16 sn; + u8 next_fn; + u16 frm_len; + struct sk_buff *skb; + struct timer_list defrag_timer; + struct rwnx_hw *rwnx_hw; +}; + +struct amsdu_subframe_hdr { + u8 da[6]; + u8 sa[6]; + u16 sublen; +}; + +/* rwnx driver status */ +void rwnx_set_conn_state(atomic_t *drv_conn_state, int state); + +enum rwnx_drv_connect_status { + RWNX_DRV_STATUS_DISCONNECTED = 0, + RWNX_DRV_STATUS_DISCONNECTING, + RWNX_DRV_STATUS_CONNECTING, + RWNX_DRV_STATUS_CONNECTED, + RWNX_DRV_STATUS_ROAMING, +}; + +static const char *const s_conn_state[] = { + "RWNX_DRV_STATUS_DISCONNECTED", + "RWNX_DRV_STATUS_DISCONNECTING", + "RWNX_DRV_STATUS_CONNECTING", + "RWNX_DRV_STATUS_CONNECTED", + "RWNX_DRV_STATUS_ROAMING", +}; + + +struct sta_tx_flowctrl { + atomic_t tx_pending_cnt; + u8 flowctrl; +}; + +struct rwnx_hw { + struct rwnx_mod_params *mod_params; + struct device *dev; +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usbdev; +#endif + struct wiphy *wiphy; + struct list_head vifs; + struct rwnx_vif *vif_table[NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX]; /* indexed with fw id */ + struct rwnx_sta sta_table[NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX]; + #ifdef CONFIG_HE_FOR_OLD_KERNEL + struct aic_sta aic_table[NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX]; + #endif + struct rwnx_survey_info survey[SCAN_CHANNEL_MAX]; + struct cfg80211_scan_request *scan_request; +#ifdef CONFIG_SCHED_SCAN + struct cfg80211_sched_scan_request *sched_scan_req; +#endif + struct rwnx_chanctx chanctx_table[NX_CHAN_CTXT_CNT]; + u8 cur_chanctx; + + u8 monitor_vif; /* FW id of the monitor interface, RWNX_INVALID_VIF if no monitor vif at fw level */ +#ifdef CONFIG_FILTER_TCP_ACK + /* tcp ack management */ + struct tcp_ack_manage ack_m; +#endif + /* RoC Management */ + struct rwnx_roc_elem *roc_elem; /* Information provided by cfg80211 in its remain on channel request */ + u32 roc_cookie_cnt; /* Counter used to identify RoC request sent by cfg80211 */ + + struct rwnx_cmd_mgr *cmd_mgr; + + struct rwnx_plat *plat; + + spinlock_t tx_lock; + spinlock_t cb_lock; + struct mutex mutex; /* per-device perimeter lock */ + + struct tasklet_struct task; + struct mm_version_cfm version_cfm; /* Lower layers versions - obtained via MM_VERSION_REQ */ + + u32 tcp_pacing_shift; + + /* IPC */ + struct ipc_host_env_tag *ipc_env; +#ifdef AICWF_SDIO_SUPPORT + struct sdio_host_env_tag sdio_env; +#endif +#ifdef AICWF_USB_SUPPORT + struct usb_host_env_tag usb_env; +#endif + + struct rwnx_ipc_elem_pool e2amsgs_pool; + struct rwnx_ipc_elem_pool dbgmsgs_pool; + struct rwnx_ipc_elem_pool e2aradars_pool; + struct rwnx_ipc_elem_var pattern_elem; + struct rwnx_ipc_dbgdump_elem dbgdump_elem; + struct rwnx_ipc_elem_pool e2arxdesc_pool; + struct rwnx_ipc_skb_elem *e2aunsuprxvec_elems; + //struct rwnx_ipc_rxbuf_elems rxbuf_elems; + struct rwnx_ipc_elem_var scan_ie; + + struct kmem_cache *sw_txhdr_cache; + + struct rwnx_debugfs debugfs; + struct rwnx_stats stats; + +#ifdef CONFIG_PREALLOC_TXQ + struct rwnx_txq *txq; +#else + struct rwnx_txq txq[NX_NB_TXQ]; +#endif + struct rwnx_hwq hwq[NX_TXQ_CNT]; + + u64 avail_idx_map; + u8 vif_started; + bool adding_sta; + struct rwnx_phy_info phy; + + struct rwnx_radar radar; + + /* extended capabilities supported */ + u8 ext_capa[8]; + +#ifdef CONFIG_RWNX_MUMIMO_TX + struct rwnx_mu_info mu; +#endif + u8 is_p2p_alive; + u8 is_p2p_connected; + struct timer_list p2p_alive_timer; + struct rwnx_vif *p2p_dev_vif; + atomic_t p2p_alive_timer_count; + bool band_5g_support; + bool fwlog_en; + bool scanning; + bool p2p_working; + + struct list_head defrag_list; + spinlock_t defrag_lock; + + struct work_struct apmStalossWork; + struct workqueue_struct *apmStaloss_wq; + u8 apm_vif_idx; + u8 sta_mac_addr[6]; + + struct wakeup_source *ws_rx; + struct wakeup_source *ws_irqrx; + struct wakeup_source *ws_tx; + struct wakeup_source *ws_pwrctrl; + +#ifdef CONFIG_SCHED_SCAN + bool is_sched_scan; +#endif//CONFIG_SCHED_SCAN + + struct sta_tx_flowctrl sta_flowctrl[NX_REMOTE_STA_MAX]; +#if 0 + bool he_flag; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) + struct mac_chan_op ap_chan; + struct ieee80211_channel set_chan; +#endif +#ifdef CONFIG_VHT_FOR_OLD_KERNEL + struct ieee80211_sta_vht_cap vht_cap_2G; + struct ieee80211_sta_vht_cap vht_cap_5G; +#endif + +#ifdef CONFIG_USE_WIRELESS_EXT + bool wext_scan; + struct completion wext_scan_com; + struct list_head wext_scanre_list; + char wext_essid[33]; + int support_freqs[SCAN_CHANNEL_MAX]; + int support_freqs_number; +#ifdef CONFIG_DYNAMIC_PWR + struct timer_list pwrloss_timer; + struct work_struct pwrloss_work; + struct rwnx_vif *read_rssi_vif; + s8 pwrloss_lvl; + u8 sta_rssi_idx; +#endif +#endif +#ifdef CONFIG_BAND_STEERING + u8_l iface_idx; + struct tmp_feature_sta feature_table[NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX]; +#endif +}; + +u8 *rwnx_build_bcn(struct rwnx_bcn *bcn, struct cfg80211_beacon_data *new); + +void rwnx_chanctx_link(struct rwnx_vif *vif, u8 idx, + struct cfg80211_chan_def *chandef); +void rwnx_chanctx_unlink(struct rwnx_vif *vif); +int rwnx_chanctx_valid(struct rwnx_hw *rwnx_hw, u8 idx); + +extern u8 chip_id; +static inline bool is_multicast_sta(int sta_idx) +{ + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + return (sta_idx >= NX_REMOTE_STA_MAX_FOR_OLD_IC); + }else{ + return (sta_idx >= NX_REMOTE_STA_MAX); + } + +} +struct rwnx_sta *rwnx_get_sta(struct rwnx_hw *rwnx_hw, const u8 *mac_addr); + +static inline uint8_t master_vif_idx(struct rwnx_vif *vif) +{ + if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_AP_VLAN)) { + return vif->ap_vlan.master->vif_index; + } else { + return vif->vif_index; + } +} + +void rwnx_external_auth_enable(struct rwnx_vif *vif); +void rwnx_external_auth_disable(struct rwnx_vif *vif); + +#endif /* _RWNX_DEFS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_dini.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_dini.c new file mode 100644 index 0000000..03e1ced --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_dini.c @@ -0,0 +1,294 @@ +/** + ****************************************************************************** + * + * @file rwnx_dini.c - Add support for dini platform + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#include "rwnx_dini.h" +#include "rwnx_defs.h" +#include "rwnx_irqs.h" +#include "reg_access.h" + +/* Config FPGA is accessed via bar0 */ +#define CFPGA_DMA0_CTRL_REG 0x02C +#define CFPGA_DMA1_CTRL_REG 0x04C +#define CFPGA_DMA2_CTRL_REG 0x06C +#define CFPGA_UINTR_SRC_REG 0x0E8 +#define CFPGA_UINTR_MASK_REG 0x0EC +#define CFPGA_BAR4_HIADDR_REG 0x100 +#define CFPGA_BAR4_LOADDR_REG 0x104 +#define CFPGA_BAR4_LOADDR_MASK_REG 0x110 +#define CFPGA_BAR_TOUT 0x120 + +#define CFPGA_DMA_CTRL_ENABLE 0x00001400 +#define CFPGA_DMA_CTRL_DISABLE 0x00001000 +#define CFPGA_DMA_CTRL_CLEAR 0x00001800 +#define CFPGA_DMA_CTRL_REREAD_TIME_MASK (BIT(10) - 1) + +#define CFPGA_BAR4_LOADDR_MASK_MAX 0xFF000000 + +#define CFPGA_PCIEX_IT 0x00000001 +#define CFPGA_ALL_ITS 0x0000000F + +/* Programmable BAR4 Window start address */ +#define CPU_RAM_WINDOW_HIGH 0x00000000 +#define CPU_RAM_WINDOW_LOW 0x00000000 +#define AHB_BRIDGE_WINDOW_HIGH 0x00000000 +#define AHB_BRIDGE_WINDOW_LOW 0x60000000 + +struct rwnx_dini +{ + u8 *pci_bar0_vaddr; + u8 *pci_bar4_vaddr; +}; + +static const u32 mv_cfg_fpga_dma_ctrl_regs[] = { + CFPGA_DMA0_CTRL_REG, + CFPGA_DMA1_CTRL_REG, + CFPGA_DMA2_CTRL_REG +}; + +/* This also clears running transactions */ +static void dini_dma_on(struct rwnx_dini *rwnx_dini) +{ + int i; + u32 reread_time; + volatile void *reg; + + for (i = 0; i < ARRAY_SIZE(mv_cfg_fpga_dma_ctrl_regs); i++) { + reg = rwnx_dini->pci_bar0_vaddr + mv_cfg_fpga_dma_ctrl_regs[i]; + reread_time = readl(reg) & CFPGA_DMA_CTRL_REREAD_TIME_MASK; + + writel(CFPGA_DMA_CTRL_CLEAR | reread_time, reg); + writel(CFPGA_DMA_CTRL_ENABLE | reread_time, reg); + } +} + +/* This also clears running transactions */ +static void dini_dma_off(struct rwnx_dini *rwnx_dini) +{ + int i; + u32 reread_time; + volatile void *reg; + + for (i = 0; i < ARRAY_SIZE(mv_cfg_fpga_dma_ctrl_regs); i++) { + reg = rwnx_dini->pci_bar0_vaddr + mv_cfg_fpga_dma_ctrl_regs[i]; + reread_time = readl(reg) & CFPGA_DMA_CTRL_REREAD_TIME_MASK; + + writel(CFPGA_DMA_CTRL_DISABLE | reread_time, reg); + writel(CFPGA_DMA_CTRL_CLEAR | reread_time, reg); + } +} + + +/* Configure address range for BAR4. + * By default BAR4_LOADDR_MASK value is 0xFF000000, then there is no need to + * change it because the addresses we need to access are covered by this mask + */ +static void dini_set_bar4_win(u32 low, u32 high, struct rwnx_dini *rwnx_dini) +{ + writel(low, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR4_LOADDR_REG); + writel(high, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR4_HIADDR_REG); + writel(CFPGA_BAR4_LOADDR_MASK_MAX, + rwnx_dini->pci_bar0_vaddr + CFPGA_BAR4_LOADDR_MASK_REG); +} + + +/** + * Enable User Interrupts of CFPGA that trigger PCIe IRQs on PCIE_10 + * and request the corresponding IRQ line + */ +int rwnx_cfpga_irq_enable(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv; + unsigned int cfpga_uintr_mask; + volatile void *reg; + int ret; + + /* sched_setscheduler on ONESHOT threaded irq handler for BCNs ? */ + if ((ret = request_irq(rwnx_hw->plat->pci_dev->irq, rwnx_irq_hdlr, 0, + "rwnx", rwnx_hw))) + return ret; + + reg = rwnx_dini->pci_bar0_vaddr + CFPGA_UINTR_MASK_REG; + cfpga_uintr_mask = readl(reg); + writel(cfpga_uintr_mask | CFPGA_PCIEX_IT, reg); + + return ret; +} + +/** + * Disable User Interrupts of CFPGA that trigger PCIe IRQs on PCIE_10 + * and free the corresponding IRQ line + */ +int rwnx_cfpga_irq_disable(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv; + unsigned int cfpga_uintr_mask; + volatile void *reg; + + reg = rwnx_dini->pci_bar0_vaddr + CFPGA_UINTR_MASK_REG; + cfpga_uintr_mask = readl(reg); + writel(cfpga_uintr_mask & ~CFPGA_PCIEX_IT, reg); + + free_irq(rwnx_hw->plat->pci_dev->irq, rwnx_hw); + + return 0; +} + +static int rwnx_dini_platform_enable(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv; + +#ifdef CONFIG_RWNX_SDM + writel(0x0000FFFF, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR_TOUT); +#endif + + dini_dma_on(rwnx_dini); + return rwnx_cfpga_irq_enable(rwnx_hw); +} + +static int rwnx_dini_platform_disable(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv; + int ret; + + ret = rwnx_cfpga_irq_disable(rwnx_hw); + dini_dma_off(rwnx_dini); + return ret; +} + +static void rwnx_dini_platform_deinit(struct rwnx_plat *rwnx_plat) +{ + struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv; + + pci_disable_device(rwnx_plat->pci_dev); + iounmap(rwnx_dini->pci_bar0_vaddr); + iounmap(rwnx_dini->pci_bar4_vaddr); + pci_release_regions(rwnx_plat->pci_dev); + + kfree(rwnx_plat); +} + +static u8* rwnx_dini_get_address(struct rwnx_plat *rwnx_plat, int addr_name, + unsigned int offset) +{ + struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv; + + if (WARN(addr_name >= RWNX_ADDR_MAX, "Invalid address %d", addr_name)) + return NULL; + + if (addr_name == RWNX_ADDR_CPU) + dini_set_bar4_win(CPU_RAM_WINDOW_LOW, CPU_RAM_WINDOW_HIGH, rwnx_dini); + else + dini_set_bar4_win(AHB_BRIDGE_WINDOW_LOW, AHB_BRIDGE_WINDOW_HIGH, rwnx_dini); + + return rwnx_dini->pci_bar4_vaddr + offset; +} + +static void rwnx_dini_ack_irq(struct rwnx_plat *rwnx_plat) +{ + struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv; + + writel(CFPGA_ALL_ITS, rwnx_dini->pci_bar0_vaddr + CFPGA_UINTR_SRC_REG); +} + +static const u32 rwnx_dini_config_reg[] = { + NXMAC_DEBUG_PORT_SEL_ADDR, + SYSCTRL_DIAG_CONF_ADDR, + RF_V6_DIAGPORT_CONF1_ADDR, + RF_v6_PHYDIAG_CONF1_ADDR, +}; + +static int rwnx_dini_get_config_reg(struct rwnx_plat *rwnx_plat, const u32 **list) +{ + if (!list) + return 0; + + *list = rwnx_dini_config_reg; + return ARRAY_SIZE(rwnx_dini_config_reg); +} + +/** + * rwnx_dini_platform_init - Initialize the DINI platform + * + * @pci_dev PCI device + * @rwnx_plat Pointer on struct rwnx_stat * to be populated + * + * @return 0 on success, < 0 otherwise + * + * Allocate and initialize a rwnx_plat structure for the dini platform. + */ +int rwnx_dini_platform_init(struct pci_dev *pci_dev, struct rwnx_plat **rwnx_plat) +{ + struct rwnx_dini *rwnx_dini; + u16 pci_cmd; + int ret = 0; + + *rwnx_plat = kzalloc(sizeof(struct rwnx_plat) + sizeof(struct rwnx_dini), + GFP_KERNEL); + if (!*rwnx_plat) + return -ENOMEM; + + rwnx_dini = (struct rwnx_dini *)(*rwnx_plat)->priv; + + /* Hotplug fixups */ + pci_read_config_word(pci_dev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(pci_dev, PCI_COMMAND, pci_cmd); + pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES >> 2); + + if ((ret = pci_enable_device(pci_dev))) { + dev_err(&(pci_dev->dev), "pci_enable_device failed\n"); + goto out_enable; + } + + pci_set_master(pci_dev); + + if ((ret = pci_request_regions(pci_dev, KBUILD_MODNAME))) { + dev_err(&(pci_dev->dev), "pci_request_regions failed\n"); + goto out_request; + } + + if (!(rwnx_dini->pci_bar0_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 0))) { + dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 0); + ret = -ENOMEM; + goto out_bar0; + } + if (!(rwnx_dini->pci_bar4_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 4))) { + dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 4); + ret = -ENOMEM; + goto out_bar4; + } + + (*rwnx_plat)->enable = rwnx_dini_platform_enable; + (*rwnx_plat)->disable = rwnx_dini_platform_disable; + (*rwnx_plat)->deinit = rwnx_dini_platform_deinit; + (*rwnx_plat)->get_address = rwnx_dini_get_address; + (*rwnx_plat)->ack_irq = rwnx_dini_ack_irq; + (*rwnx_plat)->get_config_reg = rwnx_dini_get_config_reg; + +#ifdef CONFIG_RWNX_SDM + writel(0x0000FFFF, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR_TOUT); +#endif + + return 0; + + out_bar4: + iounmap(rwnx_dini->pci_bar0_vaddr); + out_bar0: + pci_release_regions(pci_dev); + out_request: + pci_disable_device(pci_dev); + out_enable: + kfree(*rwnx_plat); + return ret; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_dini.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_dini.h new file mode 100644 index 0000000..2d8cf35 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_dini.h @@ -0,0 +1,20 @@ +/** + **************************************************************************************** + * + * @file rwnx_dini.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_DINI_H_ +#define _RWNX_DINI_H_ + +#include +#include "rwnx_platform.h" + +int rwnx_dini_platform_init(struct pci_dev *pci_dev, + struct rwnx_plat **rwnx_plat); + +#endif /* _RWNX_DINI_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_events.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_events.h new file mode 100644 index 0000000..7fba509 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_events.h @@ -0,0 +1,1243 @@ +/** + ****************************************************************************** + * + * @file rwnx_events.h + * + * @brief Trace events definition + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rwnx + +#if !defined(_RWNX_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _RWNX_EVENTS_H + +#include +#ifndef CONFIG_RWNX_FHOST +#include "rwnx_tx.h" +#endif +#include "rwnx_compat.h" + +/***************************************************************************** + * TRACE function for MGMT TX (FULLMAC) + ****************************************************************************/ +#ifdef CONFIG_RWNX_FULLMAC +#include "linux/ieee80211.h" +#if defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS) +#include + +/* P2P Public Action Frames Definitions (see WiFi P2P Technical Specification, section 4.2.8) */ +/* IEEE 802.11 Public Action Usage Category - Define P2P public action frames */ +#define MGMT_ACTION_PUBLIC_CAT (0x04) +/* Offset of OUI Subtype field in P2P Action Frame format */ +#define MGMT_ACTION_OUI_SUBTYPE_OFFSET (6) +/* P2P Public Action Frame Types */ +enum p2p_action_type { + P2P_ACTION_GO_NEG_REQ = 0, /* GO Negociation Request */ + P2P_ACTION_GO_NEG_RSP, /* GO Negociation Response */ + P2P_ACTION_GO_NEG_CFM, /* GO Negociation Confirmation */ + P2P_ACTION_INVIT_REQ, /* P2P Invitation Request */ + P2P_ACTION_INVIT_RSP, /* P2P Invitation Response */ + P2P_ACTION_DEV_DISC_REQ, /* Device Discoverability Request */ + P2P_ACTION_DEV_DISC_RSP, /* Device Discoverability Response */ + P2P_ACTION_PROV_DISC_REQ, /* Provision Discovery Request */ + P2P_ACTION_PROV_DISC_RSP, /* Provision Discovery Response */ +}; + +const char *ftrace_print_mgmt_info(struct trace_seq *p, u16 frame_control, u8 cat, u8 type, u8 p2p) { + const char *ret = trace_seq_buffer_ptr(p); + + switch (frame_control & IEEE80211_FCTL_STYPE) { + case (IEEE80211_STYPE_ASSOC_REQ): trace_seq_printf(p, "Association Request"); break; + case (IEEE80211_STYPE_ASSOC_RESP): trace_seq_printf(p, "Association Response"); break; + case (IEEE80211_STYPE_REASSOC_REQ): trace_seq_printf(p, "Reassociation Request"); break; + case (IEEE80211_STYPE_REASSOC_RESP): trace_seq_printf(p, "Reassociation Response"); break; + case (IEEE80211_STYPE_PROBE_REQ): trace_seq_printf(p, "Probe Request"); break; + case (IEEE80211_STYPE_PROBE_RESP): trace_seq_printf(p, "Probe Response"); break; + case (IEEE80211_STYPE_BEACON): trace_seq_printf(p, "Beacon"); break; + case (IEEE80211_STYPE_ATIM): trace_seq_printf(p, "ATIM"); break; + case (IEEE80211_STYPE_DISASSOC): trace_seq_printf(p, "Disassociation"); break; + case (IEEE80211_STYPE_AUTH): trace_seq_printf(p, "Authentication"); break; + case (IEEE80211_STYPE_DEAUTH): trace_seq_printf(p, "Deauthentication"); break; + case (IEEE80211_STYPE_ACTION): + trace_seq_printf(p, "Action"); + if (cat == MGMT_ACTION_PUBLIC_CAT && type == 0x9) + switch (p2p) { + case (P2P_ACTION_GO_NEG_REQ): trace_seq_printf(p, ": GO Negociation Request"); break; + case (P2P_ACTION_GO_NEG_RSP): trace_seq_printf(p, ": GO Negociation Response"); break; + case (P2P_ACTION_GO_NEG_CFM): trace_seq_printf(p, ": GO Negociation Confirmation"); break; + case (P2P_ACTION_INVIT_REQ): trace_seq_printf(p, ": P2P Invitation Request"); break; + case (P2P_ACTION_INVIT_RSP): trace_seq_printf(p, ": P2P Invitation Response"); break; + case (P2P_ACTION_DEV_DISC_REQ): trace_seq_printf(p, ": Device Discoverability Request"); break; + case (P2P_ACTION_DEV_DISC_RSP): trace_seq_printf(p, ": Device Discoverability Response"); break; + case (P2P_ACTION_PROV_DISC_REQ): trace_seq_printf(p, ": Provision Discovery Request"); break; + case (P2P_ACTION_PROV_DISC_RSP): trace_seq_printf(p, ": Provision Discovery Response"); break; + default: trace_seq_printf(p, "Unknown p2p %d", p2p); break; + } + else { + switch (cat) { + case 0: trace_seq_printf(p, ":Spectrum %d", type); break; + case 1: trace_seq_printf(p, ":QOS %d", type); break; + case 2: trace_seq_printf(p, ":DLS %d", type); break; + case 3: trace_seq_printf(p, ":BA %d", type); break; + case 4: trace_seq_printf(p, ":Public %d", type); break; + case 5: trace_seq_printf(p, ":Radio Measure %d", type); break; + case 6: trace_seq_printf(p, ":Fast BSS %d", type); break; + case 7: trace_seq_printf(p, ":HT Action %d", type); break; + case 8: trace_seq_printf(p, ":SA Query %d", type); break; + case 9: trace_seq_printf(p, ":Protected Public %d", type); break; + case 10: trace_seq_printf(p, ":WNM %d", type); break; + case 11: trace_seq_printf(p, ":Unprotected WNM %d", type); break; + case 12: trace_seq_printf(p, ":TDLS %d", type); break; + case 13: trace_seq_printf(p, ":Mesh %d", type); break; + case 14: trace_seq_printf(p, ":MultiHop %d", type); break; + case 15: trace_seq_printf(p, ":Self Protected %d", type); break; + case 126: trace_seq_printf(p, ":Vendor protected"); break; + case 127: trace_seq_printf(p, ":Vendor"); break; + default: trace_seq_printf(p, ":Unknown category %d", cat); break; + } + } + break; + default: trace_seq_printf(p, "Unknown subtype %d", frame_control & IEEE80211_FCTL_STYPE); break; + } + + trace_seq_putc(p, 0); + + return ret; +} +#endif /* defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS) */ + +#undef __print_mgmt_info +#define __print_mgmt_info(frame_control, cat, type, p2p) ftrace_print_mgmt_info(p, frame_control, cat, type, p2p) + +TRACE_EVENT( + roc, + TP_PROTO(u8 vif_idx, u16 freq, unsigned int duration), + TP_ARGS(vif_idx, freq, duration), + TP_STRUCT__entry( + __field(u8, vif_idx) + __field(u16, freq) + __field(unsigned int, duration) + ), + TP_fast_assign( + __entry->vif_idx = vif_idx; + __entry->freq = freq; + __entry->duration = duration; + ), + TP_printk("f=%d vif=%d dur=%d", + __entry->freq, __entry->vif_idx, __entry->duration) +); + +TRACE_EVENT( + cancel_roc, + TP_PROTO(u8 vif_idx), + TP_ARGS(vif_idx), + TP_STRUCT__entry( + __field(u8, vif_idx) + ), + TP_fast_assign( + __entry->vif_idx = vif_idx; + ), + TP_printk("vif=%d", __entry->vif_idx) +); + +TRACE_EVENT( + roc_exp, + TP_PROTO(u8 vif_idx), + TP_ARGS(vif_idx), + TP_STRUCT__entry( + __field(u8, vif_idx) + ), + TP_fast_assign( + __entry->vif_idx = vif_idx; + ), + TP_printk("vif=%d", __entry->vif_idx) +); + +TRACE_EVENT( + switch_roc, + TP_PROTO(u8 vif_idx), + TP_ARGS(vif_idx), + TP_STRUCT__entry( + __field(u8, vif_idx) + ), + TP_fast_assign( + __entry->vif_idx = vif_idx; + ), + TP_printk("vif=%d", __entry->vif_idx) +); + +DECLARE_EVENT_CLASS( + mgmt_template, + TP_PROTO(u16 freq, u8 vif_idx, u8 sta_idx, struct ieee80211_mgmt *mgmt), + TP_ARGS(freq, vif_idx, sta_idx, mgmt), + TP_STRUCT__entry( + __field(u16, freq) + __field(u8, vif_idx) + __field(u8, sta_idx) + __field(u16, frame_control) + __field(u8, action_cat) + __field(u8, action_type) + __field(u8, action_p2p) + ), + TP_fast_assign( + __entry->freq = freq; + __entry->vif_idx = vif_idx; + __entry->sta_idx = sta_idx; + __entry->frame_control = mgmt->frame_control; + __entry->action_cat = mgmt->u.action.category; + __entry->action_type = mgmt->u.action.u.wme_action.action_code; + __entry->action_p2p = *((u8 *)&mgmt->u.action.category + + MGMT_ACTION_OUI_SUBTYPE_OFFSET); + ), + TP_printk("f=%d vif=%d sta=%d -> %s", + __entry->freq, __entry->vif_idx, __entry->sta_idx, + __print_mgmt_info(__entry->frame_control, __entry->action_cat, + __entry->action_type, __entry->action_p2p)) +); + +DEFINE_EVENT(mgmt_template, mgmt_tx, + TP_PROTO(u16 freq, u8 vif_idx, u8 sta_idx, struct ieee80211_mgmt *mgmt), + TP_ARGS(freq, vif_idx, sta_idx, mgmt)); + +DEFINE_EVENT(mgmt_template, mgmt_rx, + TP_PROTO(u16 freq, u8 vif_idx, u8 sta_idx, struct ieee80211_mgmt *mgmt), + TP_ARGS(freq, vif_idx, sta_idx, mgmt)); + +TRACE_EVENT( + mgmt_cfm, + TP_PROTO(u8 vif_idx, u8 sta_idx, bool acked), + TP_ARGS(vif_idx, sta_idx, acked), + TP_STRUCT__entry( + __field(u8, vif_idx) + __field(u8, sta_idx) + __field(bool, acked) + ), + TP_fast_assign( + __entry->vif_idx = vif_idx; + __entry->sta_idx = sta_idx; + __entry->acked = acked; + ), + TP_printk("vif=%d sta=%d ack=%d", + __entry->vif_idx, __entry->sta_idx, __entry->acked) +); +#endif /* CONFIG_RWNX_FULLMAC */ + +/***************************************************************************** + * TRACE function for TXQ + ****************************************************************************/ +#ifndef CONFIG_RWNX_FHOST +#if defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS) + +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) +#include +#else +#include +#endif + +const char * +ftrace_print_txq(struct trace_seq *p, int txq_idx) { + const char *ret = trace_seq_buffer_ptr(p); + + if (txq_idx == TXQ_INACTIVE) { + trace_seq_printf(p, "[INACTIVE]"); + } else if (txq_idx < NX_FIRST_VIF_TXQ_IDX) { + trace_seq_printf(p, "[STA %d/%d]", + txq_idx / NX_NB_TXQ_PER_STA, + txq_idx % NX_NB_TXQ_PER_STA); +#ifdef CONFIG_RWNX_FULLMAC + } else if (txq_idx < NX_FIRST_UNK_TXQ_IDX) { + trace_seq_printf(p, "[BC/MC %d]", + txq_idx - NX_FIRST_BCMC_TXQ_IDX); + } else if (txq_idx < NX_OFF_CHAN_TXQ_IDX) { + trace_seq_printf(p, "[UNKNOWN %d]", + txq_idx - NX_FIRST_UNK_TXQ_IDX); + } else if (txq_idx == NX_OFF_CHAN_TXQ_IDX) { + trace_seq_printf(p, "[OFFCHAN]"); +#else + } else if (txq_idx < NX_NB_TXQ) { + txq_idx -= NX_FIRST_VIF_TXQ_IDX; + trace_seq_printf(p, "[VIF %d/%d]", + txq_idx / NX_NB_TXQ_PER_VIF, + txq_idx % NX_NB_TXQ_PER_VIF); +#endif + } else { + trace_seq_printf(p, "[ERROR %d]", txq_idx); + } + + trace_seq_putc(p, 0); + + return ret; +} + +const char * +ftrace_print_sta(struct trace_seq *p, int sta_idx) { + const char *ret = trace_seq_buffer_ptr(p); + + if (sta_idx < NX_REMOTE_STA_MAX) { + trace_seq_printf(p, "[STA %d]", sta_idx); + } else { + trace_seq_printf(p, "[BC/MC %d]", sta_idx - NX_REMOTE_STA_MAX); + } + + trace_seq_putc(p, 0); + + return ret; +} + +const char * +ftrace_print_hwq(struct trace_seq *p, int hwq_idx) { + + static const struct trace_print_flags symbols[] = + {{RWNX_HWQ_BK, "BK"}, + {RWNX_HWQ_BE, "BE"}, + {RWNX_HWQ_VI, "VI"}, + {RWNX_HWQ_VO, "VO"}, +#ifdef CONFIG_RWNX_FULLMAC + {RWNX_HWQ_BCMC, "BCMC"}, +#else + {RWNX_HWQ_BCN, "BCN"}, +#endif + { -1, NULL }}; + return trace_print_symbols_seq(p, hwq_idx, symbols); +} + +const char * +ftrace_print_hwq_cred(struct trace_seq *p, u8 *cred) { + const char *ret = trace_seq_buffer_ptr(p); + +#if CONFIG_USER_MAX == 1 + trace_seq_printf(p, "%d", cred[0]); +#else + int i; + + for (i = 0; i < CONFIG_USER_MAX - 1; i++) + trace_seq_printf(p, "%d-", cred[i]); + trace_seq_printf(p, "%d", cred[i]); +#endif + + trace_seq_putc(p, 0); + return ret; +} + +const char * +ftrace_print_mu_info(struct trace_seq *p, u8 mu_info) { + const char *ret = trace_seq_buffer_ptr(p); + + if (mu_info) + trace_seq_printf(p, "MU: %d-%d", (mu_info & 0x3f), (mu_info >> 6)); + + trace_seq_putc(p, 0); + return ret; +} + +const char * +ftrace_print_mu_group(struct trace_seq *p, int nb_user, u8 *users) { + const char *ret = trace_seq_buffer_ptr(p); + int i; + + if (users[0] != 0xff) + trace_seq_printf(p, "(%d", users[0]); + else + trace_seq_printf(p, "(-"); + for (i = 1; i < CONFIG_USER_MAX ; i++) { + if (users[i] != 0xff) + trace_seq_printf(p, ",%d", users[i]); + else + trace_seq_printf(p, ",-"); + } + + trace_seq_printf(p, ")"); + trace_seq_putc(p, 0); + return ret; +} + +const char * +ftrace_print_amsdu(struct trace_seq *p, u16 nb_pkt) { + const char *ret = trace_seq_buffer_ptr(p); + + if (nb_pkt > 1) + trace_seq_printf(p, "(AMSDU %d)", nb_pkt); + + trace_seq_putc(p, 0); + return ret; +} +#endif /* defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS) */ + +#undef __print_txq +#define __print_txq(txq_idx) ftrace_print_txq(p, txq_idx) + +#undef __print_sta +#define __print_sta(sta_idx) ftrace_print_sta(p, sta_idx) + +#undef __print_hwq +#define __print_hwq(hwq) ftrace_print_hwq(p, hwq) + +#undef __print_hwq_cred +#define __print_hwq_cred(cred) ftrace_print_hwq_cred(p, cred) + +#undef __print_mu_info +#define __print_mu_info(mu_info) ftrace_print_mu_info(p, mu_info) + +#undef __print_mu_group +#define __print_mu_group(nb, users) ftrace_print_mu_group(p, nb, users) + +#undef __print_amsdu +#define __print_amsdu(nb_pkt) ftrace_print_amsdu(p, nb_pkt) + +#ifdef CONFIG_RWNX_FULLMAC + +TRACE_EVENT( + txq_select, + TP_PROTO(int txq_idx, u16 pkt_ready_up, struct sk_buff *skb), + TP_ARGS(txq_idx, pkt_ready_up, skb), + TP_STRUCT__entry( + __field(u16, txq_idx) + __field(u16, pkt_ready) + __field(struct sk_buff *, skb) + ), + TP_fast_assign( + __entry->txq_idx = txq_idx; + __entry->pkt_ready = pkt_ready_up; + __entry->skb = skb; + ), + TP_printk("%s pkt_ready_up=%d skb=%p", __print_txq(__entry->txq_idx), + __entry->pkt_ready, __entry->skb) +); + +#endif /* CONFIG_RWNX_FULLMAC */ + +DECLARE_EVENT_CLASS( + hwq_template, + TP_PROTO(u8 hwq_idx), + TP_ARGS(hwq_idx), + TP_STRUCT__entry( + __field(u8, hwq_idx) + ), + TP_fast_assign( + __entry->hwq_idx = hwq_idx; + ), + TP_printk("%s", __print_hwq(__entry->hwq_idx)) +); + +DEFINE_EVENT(hwq_template, hwq_flowctrl_stop, + TP_PROTO(u8 hwq_idx), + TP_ARGS(hwq_idx)); + +DEFINE_EVENT(hwq_template, hwq_flowctrl_start, + TP_PROTO(u8 hwq_idx), + TP_ARGS(hwq_idx)); + + +DECLARE_EVENT_CLASS( + txq_template, + TP_PROTO(struct rwnx_txq *txq), + TP_ARGS(txq), + TP_STRUCT__entry( + __field(u16, txq_idx) + ), + TP_fast_assign( + __entry->txq_idx = txq->idx; + ), + TP_printk("%s", __print_txq(__entry->txq_idx)) +); + +DEFINE_EVENT(txq_template, txq_add_to_hw, + TP_PROTO(struct rwnx_txq *txq), + TP_ARGS(txq)); + +DEFINE_EVENT(txq_template, txq_del_from_hw, + TP_PROTO(struct rwnx_txq *txq), + TP_ARGS(txq)); + +#ifdef CONFIG_RWNX_FULLMAC + +DEFINE_EVENT(txq_template, txq_flowctrl_stop, + TP_PROTO(struct rwnx_txq *txq), + TP_ARGS(txq)); + +DEFINE_EVENT(txq_template, txq_flowctrl_restart, + TP_PROTO(struct rwnx_txq *txq), + TP_ARGS(txq)); + +#endif /* CONFIG_RWNX_FULLMAC */ + +TRACE_EVENT( + process_txq, + TP_PROTO(struct rwnx_txq *txq), + TP_ARGS(txq), + TP_STRUCT__entry( + __field(u16, txq_idx) + __field(u16, len) + __field(u16, len_retry) + __field(s8, credit) + #ifdef CONFIG_RWNX_FULLMAC + __field(u16, limit) + #endif /* CONFIG_RWNX_FULLMAC*/ + ), + TP_fast_assign( + __entry->txq_idx = txq->idx; + __entry->len = skb_queue_len(&txq->sk_list); + #ifdef CONFIG_MAC80211_TXQ + __entry->len += txq->nb_ready_mac80211; + #endif + __entry->len_retry = txq->nb_retry; + __entry->credit = txq->credits; + #ifdef CONFIG_RWNX_FULLMAC + __entry->limit = txq->push_limit; + #endif /* CONFIG_RWNX_FULLMAC*/ + ), + + #ifdef CONFIG_RWNX_FULLMAC + TP_printk("%s txq_credits=%d, len=%d, retry_len=%d, push_limit=%d", + __print_txq(__entry->txq_idx), __entry->credit, + __entry->len, __entry->len_retry, __entry->limit) + #else + TP_printk("%s txq_credits=%d, len=%d, retry_len=%d", + __print_txq(__entry->txq_idx), __entry->credit, + __entry->len, __entry->len_retry) + #endif /* CONFIG_RWNX_FULLMAC*/ +); + +DECLARE_EVENT_CLASS( + txq_reason_template, + TP_PROTO(struct rwnx_txq *txq, u16 reason), + TP_ARGS(txq, reason), + TP_STRUCT__entry( + __field(u16, txq_idx) + __field(u16, reason) + __field(u16, status) + ), + TP_fast_assign( + __entry->txq_idx = txq->idx; + __entry->reason = reason; + __entry->status = txq->status; + ), + TP_printk("%s reason=%s status=%s", + __print_txq(__entry->txq_idx), + __print_symbolic(__entry->reason, + {RWNX_TXQ_STOP_FULL, "FULL"}, + {RWNX_TXQ_STOP_CSA, "CSA"}, + {RWNX_TXQ_STOP_STA_PS, "PS"}, + {RWNX_TXQ_STOP_VIF_PS, "VPS"}, + {RWNX_TXQ_STOP_CHAN, "CHAN"}, + {RWNX_TXQ_STOP_MU_POS, "MU"}), + __print_flags(__entry->status, "|", + {RWNX_TXQ_IN_HWQ_LIST, "IN LIST"}, + {RWNX_TXQ_STOP_FULL, "FULL"}, + {RWNX_TXQ_STOP_CSA, "CSA"}, + {RWNX_TXQ_STOP_STA_PS, "PS"}, + {RWNX_TXQ_STOP_VIF_PS, "VPS"}, + {RWNX_TXQ_STOP_CHAN, "CHAN"}, + {RWNX_TXQ_STOP_MU_POS, "MU"}, + {RWNX_TXQ_NDEV_FLOW_CTRL, "FLW_CTRL"})) +); + +DEFINE_EVENT(txq_reason_template, txq_start, + TP_PROTO(struct rwnx_txq *txq, u16 reason), + TP_ARGS(txq, reason)); + +DEFINE_EVENT(txq_reason_template, txq_stop, + TP_PROTO(struct rwnx_txq *txq, u16 reason), + TP_ARGS(txq, reason)); + + +TRACE_EVENT( + push_desc, + TP_PROTO(struct sk_buff *skb, struct rwnx_sw_txhdr *sw_txhdr, int push_flags), + + TP_ARGS(skb, sw_txhdr, push_flags), + + TP_STRUCT__entry( + __field(struct sk_buff *, skb) + __field(unsigned int, len) + __field(u16, tx_queue) + __field(u8, hw_queue) + __field(u8, push_flag) + __field(u32, flag) + __field(s8, txq_cred) + __field(u8, hwq_cred) + __field(u16, pkt_cnt) + __field(u8, mu_info) + ), + TP_fast_assign( + __entry->skb = skb; + __entry->tx_queue = sw_txhdr->txq->idx; + __entry->push_flag = push_flags; + __entry->hw_queue = sw_txhdr->txq->hwq->id; + __entry->txq_cred = sw_txhdr->txq->credits; + //__entry->hwq_cred = sw_txhdr->txq->hwq->credits[RWNX_TXQ_POS_ID(sw_txhdr->txq)]; +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + __entry->pkt_cnt = sw_txhdr->desc.host.packet_cnt; +#endif +#ifdef CONFIG_RWNX_FULLMAC + __entry->flag = sw_txhdr->desc.host.flags; +#ifdef CONFIG_RWNX_SPLIT_TX_BUF +#ifdef CONFIG_RWNX_AMSDUS_TX + if (sw_txhdr->amsdu.len) + __entry->len = sw_txhdr->amsdu.len; + else +#endif /* CONFIG_RWNX_AMSDUS_TX */ + __entry->len = sw_txhdr->desc.host.packet_len[0]; +#else + __entry->len = sw_txhdr->desc.host.packet_len; +#endif /* CONFIG_RWNX_SPLIT_TX_BUF */ + +#else /* !CONFIG_RWNX_FULLMAC */ + __entry->flag = sw_txhdr->desc.umac.flags; + __entry->len = sw_txhdr->frame_len; + __entry->sn = sw_txhdr->sn; +#endif /* CONFIG_RWNX_FULLMAC */ +#ifdef CONFIG_RWNX_MUMIMO_TX + __entry->mu_info = sw_txhdr->desc.host.mumimo_info; +#else + __entry->mu_info = 0; +#endif + ), + +#ifdef CONFIG_RWNX_FULLMAC + TP_printk("%s skb=%p (len=%d) hw_queue=%s cred_txq=%d cred_hwq=%d %s flag=%s %s%s%s", + __print_txq(__entry->tx_queue), __entry->skb, __entry->len, + __print_hwq(__entry->hw_queue), + __entry->txq_cred, __entry->hwq_cred, + __print_mu_info(__entry->mu_info), + __print_flags(__entry->flag, "|", + {TXU_CNTRL_RETRY, "RETRY"}, + {TXU_CNTRL_MORE_DATA, "MOREDATA"}, + {TXU_CNTRL_MGMT, "MGMT"}, + {TXU_CNTRL_MGMT_NO_CCK, "NO_CCK"}, + {TXU_CNTRL_MGMT_ROBUST, "ROBUST"}, + {TXU_CNTRL_AMSDU, "AMSDU"}, + {TXU_CNTRL_USE_4ADDR, "4ADDR"}, + {TXU_CNTRL_EOSP, "EOSP"}, + {TXU_CNTRL_MESH_FWD, "MESH_FWD"}, + {TXU_CNTRL_TDLS, "TDLS"}), + (__entry->push_flag & RWNX_PUSH_IMMEDIATE) ? "(IMMEDIATE)" : "", + (!(__entry->flag & TXU_CNTRL_RETRY) && + (__entry->push_flag & RWNX_PUSH_RETRY)) ? "(SW_RETRY)" : "", + __print_amsdu(__entry->pkt_cnt)) +#else + TP_printk("%s skb=%p (len=%d) hw_queue=%s cred_txq=%d cred_hwq=%d %s flag=%x (%s) sn=%d %s", + __print_txq(__entry->tx_queue), __entry->skb, __entry->len, + __print_hwq(__entry->hw_queue), __entry->txq_cred, __entry->hwq_cred, + __print_mu_info(__entry->mu_info), + __entry->flag, + __print_flags(__entry->push_flag, "|", + {RWNX_PUSH_RETRY, "RETRY"}, + {RWNX_PUSH_IMMEDIATE, "IMMEDIATE"}), + __entry->sn, __print_amsdu(__entry->pkt_cnt)) +#endif /* CONFIG_RWNX_FULLMAC */ +); + + +TRACE_EVENT( + txq_queue_skb, + TP_PROTO(struct sk_buff *skb, struct rwnx_txq *txq, bool retry), + TP_ARGS(skb, txq, retry), + TP_STRUCT__entry( + __field(struct sk_buff *, skb) + __field(u16, txq_idx) + __field(s8, credit) + __field(u16, q_len) + __field(u16, q_len_retry) + __field(bool, retry) + ), + TP_fast_assign( + __entry->skb = skb; + __entry->txq_idx = txq->idx; + __entry->credit = txq->credits; + __entry->q_len = skb_queue_len(&txq->sk_list); + __entry->q_len_retry = txq->nb_retry; + __entry->retry = retry; + ), + + TP_printk("%s skb=%p retry=%d txq_credits=%d queue_len=%d (retry = %d)", + __print_txq(__entry->txq_idx), __entry->skb, __entry->retry, + __entry->credit, __entry->q_len, __entry->q_len_retry) +); + +#ifdef CONFIG_MAC80211_TXQ +TRACE_EVENT( + txq_wake, + TP_PROTO(struct rwnx_txq *txq), + TP_ARGS(txq), + TP_STRUCT__entry( + __field(u16, txq_idx) + __field(u16, q_len) + ), + TP_fast_assign( + __entry->txq_idx = txq->idx; + __entry->q_len = txq->nb_ready_mac80211; + ), + + TP_printk("%s mac80211_queue_len=%d", __print_txq(__entry->txq_idx), __entry->q_len) +); + +TRACE_EVENT( + txq_drop, + TP_PROTO(struct rwnx_txq *txq, unsigned long nb_drop), + TP_ARGS(txq, nb_drop), + TP_STRUCT__entry( + __field(u16, txq_idx) + __field(u16, nb_drop) + ), + TP_fast_assign( + __entry->txq_idx = txq->idx; + __entry->nb_drop = nb_drop; + ), + + TP_printk("%s %u pkt have been dropped by codel in mac80211 txq", + __print_txq(__entry->txq_idx), __entry->nb_drop) +); + +#endif + + +DECLARE_EVENT_CLASS( + idx_template, + TP_PROTO(u16 idx), + TP_ARGS(idx), + TP_STRUCT__entry( + __field(u16, idx) + ), + TP_fast_assign( + __entry->idx = idx; + ), + TP_printk("idx=%d", __entry->idx) +); + + +DEFINE_EVENT(idx_template, txq_vif_start, + TP_PROTO(u16 idx), + TP_ARGS(idx)); + +DEFINE_EVENT(idx_template, txq_vif_stop, + TP_PROTO(u16 idx), + TP_ARGS(idx)); + +TRACE_EVENT( + process_hw_queue, + TP_PROTO(struct rwnx_hwq *hwq), + TP_ARGS(hwq), + TP_STRUCT__entry( + __field(u16, hwq) + __array(u8, credits, CONFIG_USER_MAX) + ), + TP_fast_assign( + //int i; + __entry->hwq = hwq->id; + //for (i=0; i < CONFIG_USER_MAX; i ++) + // __entry->credits[i] = hwq->credits[i]; + ), + TP_printk("hw_queue=%s hw_credits=%s", + __print_hwq(__entry->hwq), __print_hwq_cred(__entry->credits)) +); + +DECLARE_EVENT_CLASS( + sta_idx_template, + TP_PROTO(u16 idx), + TP_ARGS(idx), + TP_STRUCT__entry( + __field(u16, idx) + ), + TP_fast_assign( + __entry->idx = idx; + ), + TP_printk("%s", __print_sta(__entry->idx)) +); + +DEFINE_EVENT(sta_idx_template, txq_sta_start, + TP_PROTO(u16 idx), + TP_ARGS(idx)); + +DEFINE_EVENT(sta_idx_template, txq_sta_stop, + TP_PROTO(u16 idx), + TP_ARGS(idx)); + +#ifdef CONFIG_RWNX_FULLMAC + +DEFINE_EVENT(sta_idx_template, ps_disable, + TP_PROTO(u16 idx), + TP_ARGS(idx)); + +#endif /* CONFIG_RWNX_FULLMAC */ + +TRACE_EVENT( + skb_confirm, + TP_PROTO(struct sk_buff *skb, struct rwnx_txq *txq, struct rwnx_hwq *hwq, +#ifdef CONFIG_RWNX_FULLMAC + struct tx_cfm_tag *cfm +#else + u8 cfm +#endif + ), + + TP_ARGS(skb, txq, hwq, cfm), + + TP_STRUCT__entry( + __field(struct sk_buff *, skb) + __field(u16, txq_idx) + __field(u8, hw_queue) + __array(u8, hw_credit, CONFIG_USER_MAX) + __field(s8, sw_credit) + __field(s8, sw_credit_up) +#ifdef CONFIG_RWNX_FULLMAC + __field(u8, ampdu_size) +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + __field(u16, amsdu) +#endif /* CONFIG_RWNX_SPLIT_TX_BUF */ + __field(u16, sn) +#endif /* CONFIG_RWNX_FULLMAC*/ + ), + + TP_fast_assign( + //int i; + __entry->skb = skb; + __entry->txq_idx = txq->idx; + __entry->hw_queue = hwq->id; + //for (i = 0 ; i < CONFIG_USER_MAX ; i++) + // __entry->hw_credit[i] = hwq->credits[i]; + __entry->sw_credit = txq->credits; +#if defined CONFIG_RWNX_FULLMAC + __entry->sw_credit_up = cfm->credits; + __entry->ampdu_size = cfm->ampdu_size; +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + __entry->amsdu = cfm->amsdu_size; + __entry->sn = cfm->sn; +#endif +#else + __entry->sw_credit_up = cfm +#endif /* CONFIG_RWNX_FULLMAC */ + ), + + TP_printk("%s skb=%p hw_queue=%s, hw_credits=%s, txq_credits=%d (+%d)" +#ifdef CONFIG_RWNX_FULLMAC + " sn=%u ampdu=%d" +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + " amsdu=%u" +#endif +#endif + , __print_txq(__entry->txq_idx), __entry->skb, + __print_hwq(__entry->hw_queue), + __print_hwq_cred(__entry->hw_credit), + __entry->sw_credit, __entry->sw_credit_up +#ifdef CONFIG_RWNX_FULLMAC + , __entry->sn, __entry->ampdu_size +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + , __entry->amsdu +#endif +#endif + ) +); + +TRACE_EVENT( + credit_update, + TP_PROTO(struct rwnx_txq *txq, s8_l cred_up), + + TP_ARGS(txq, cred_up), + + TP_STRUCT__entry( + __field(struct sk_buff *, skb) + __field(u16, txq_idx) + __field(s8, sw_credit) + __field(s8, sw_credit_up) + ), + + TP_fast_assign( + __entry->txq_idx = txq->idx; + __entry->sw_credit = txq->credits; + __entry->sw_credit_up = cred_up; + ), + + TP_printk("%s txq_credits=%d (%+d)", __print_txq(__entry->txq_idx), + __entry->sw_credit, __entry->sw_credit_up) +) + +#ifdef CONFIG_RWNX_FULLMAC + +DECLARE_EVENT_CLASS( + ps_template, + TP_PROTO(struct rwnx_sta *sta), + TP_ARGS(sta), + TP_STRUCT__entry( + __field(u16, idx) + __field(u16, ready_ps) + __field(u16, sp_ps) + __field(u16, ready_uapsd) + __field(u16, sp_uapsd) + ), + TP_fast_assign( + __entry->idx = sta->sta_idx; + __entry->ready_ps = sta->ps.pkt_ready[LEGACY_PS_ID]; + __entry->sp_ps = sta->ps.sp_cnt[LEGACY_PS_ID]; + __entry->ready_uapsd = sta->ps.pkt_ready[UAPSD_ID]; + __entry->sp_uapsd = sta->ps.sp_cnt[UAPSD_ID]; + ), + + TP_printk("%s [PS] ready=%d sp=%d [UAPSD] ready=%d sp=%d", + __print_sta(__entry->idx), __entry->ready_ps, __entry->sp_ps, + __entry->ready_uapsd, __entry->sp_uapsd) +); + +DEFINE_EVENT(ps_template, ps_queue, + TP_PROTO(struct rwnx_sta *sta), + TP_ARGS(sta)); + +DEFINE_EVENT(ps_template, ps_push, + TP_PROTO(struct rwnx_sta *sta), + TP_ARGS(sta)); + +DEFINE_EVENT(ps_template, ps_enable, + TP_PROTO(struct rwnx_sta *sta), + TP_ARGS(sta)); + +TRACE_EVENT( + ps_traffic_update, + TP_PROTO(u16 sta_idx, u8 traffic, bool uapsd), + + TP_ARGS(sta_idx, traffic, uapsd), + + TP_STRUCT__entry( + __field(u16, sta_idx) + __field(u8, traffic) + __field(bool, uapsd) + ), + + TP_fast_assign( + __entry->sta_idx = sta_idx; + __entry->traffic = traffic; + __entry->uapsd = uapsd; + ), + + TP_printk("%s %s%s traffic available ", __print_sta(__entry->sta_idx), + __entry->traffic ? "" : "no more ", + __entry->uapsd ? "U-APSD" : "legacy PS") +); + +TRACE_EVENT( + ps_traffic_req, + TP_PROTO(struct rwnx_sta *sta, u16 pkt_req, u8 ps_id), + TP_ARGS(sta, pkt_req, ps_id), + TP_STRUCT__entry( + __field(u16, idx) + __field(u16, pkt_req) + __field(u8, ps_id) + __field(u16, ready) + __field(u16, sp) + ), + TP_fast_assign( + __entry->idx = sta->sta_idx; + __entry->pkt_req = pkt_req; + __entry->ps_id = ps_id; + __entry->ready = sta->ps.pkt_ready[ps_id]; + __entry->sp = sta->ps.sp_cnt[ps_id]; + ), + + TP_printk("%s %s traffic request %d pkt (ready=%d, sp=%d)", + __print_sta(__entry->idx), + __entry->ps_id == UAPSD_ID ? "U-APSD" : "legacy PS" , + __entry->pkt_req, __entry->ready, __entry->sp) +); + + +#ifdef CONFIG_RWNX_AMSDUS_TX +TRACE_EVENT( + amsdu_subframe, + TP_PROTO(struct rwnx_sw_txhdr *sw_txhdr), + TP_ARGS(sw_txhdr), + TP_STRUCT__entry( + __field(struct sk_buff *, skb) + __field(u16, txq_idx) + __field(u8, nb) + __field(u32, len) + ), + TP_fast_assign( + __entry->skb = sw_txhdr->skb; + __entry->nb = sw_txhdr->amsdu.nb; + __entry->len = sw_txhdr->amsdu.len; + __entry->txq_idx = sw_txhdr->txq->idx; + ), + + TP_printk("%s skb=%p %s nb_subframe=%d, len=%u", + __print_txq(__entry->txq_idx), __entry->skb, + (__entry->nb == 2) ? "Start new AMSDU" : "Add subframe", + __entry->nb, __entry->len) +); +#endif + +#endif /* CONFIG_RWNX_FULLMAC */ + +#ifdef CONFIG_RWNX_MUMIMO_TX +TRACE_EVENT( + mu_group_update, + TP_PROTO(struct rwnx_mu_group *group), + TP_ARGS(group), + TP_STRUCT__entry( + __field(u8, nb_user) + __field(u8, group_id) + __array(u8, users, CONFIG_USER_MAX) + ), + TP_fast_assign( + int i; + __entry->nb_user = group->user_cnt; + for (i = 0; i < CONFIG_USER_MAX ; i++) { + if (group->users[i]) { + __entry->users[i] = group->users[i]->sta_idx; + } else { + __entry->users[i] = 0xff; + } + } + + __entry->group_id = group->group_id; + ), + + TP_printk("Group-id = %d, Users = %s", + __entry->group_id, + __print_mu_group(__entry->nb_user, __entry->users)) +); + +TRACE_EVENT( + mu_group_delete, + TP_PROTO(int group_id), + TP_ARGS(group_id), + TP_STRUCT__entry( + __field(u8, group_id) + ), + TP_fast_assign( + __entry->group_id = group_id; + ), + + TP_printk("Group-id = %d", __entry->group_id) +); + +TRACE_EVENT( + mu_group_selection, + TP_PROTO(struct rwnx_sta *sta, int group_id), + TP_ARGS(sta, group_id), + TP_STRUCT__entry( + __field(u8, sta_idx) + __field(u8, group_id) + ), + TP_fast_assign( + __entry->sta_idx = sta->sta_idx; + __entry->group_id = group_id; + ), + + TP_printk("[Sta %d] Group-id = %d", __entry->sta_idx, __entry->group_id) +); + +TRACE_EVENT( + txq_select_mu_group, + TP_PROTO(struct rwnx_txq *txq, int group_id, int pos), + + TP_ARGS(txq, group_id, pos), + + TP_STRUCT__entry( + __field(u16, txq_idx) + __field(u8, group_id) + __field(u8, pos) + ), + TP_fast_assign( + __entry->txq_idx = txq->idx; + __entry->group_id = group_id; + __entry->pos = pos; + ), + + TP_printk("%s: group=%d pos=%d", __print_txq(__entry->txq_idx), + __entry->group_id, __entry->pos) +); + +#endif /* CONFIG_RWNX_MUMIMO_TX */ +#endif /* ! CONFIG_RWNX_FHOST */ + +/***************************************************************************** + * TRACE functions for MESH + ****************************************************************************/ +#ifdef CONFIG_RWNX_FULLMAC +DECLARE_EVENT_CLASS( + mesh_path_template, + TP_PROTO(struct rwnx_mesh_path *mesh_path), + TP_ARGS(mesh_path), + TP_STRUCT__entry( + __field(u8, idx) + __field(u8, next_hop_sta) + __array(u8, tgt_mac, ETH_ALEN) + ), + + TP_fast_assign( + __entry->idx = mesh_path->path_idx; + memcpy(__entry->tgt_mac, &mesh_path->tgt_mac_addr, ETH_ALEN); + if (mesh_path->p_nhop_sta) + __entry->next_hop_sta = mesh_path->p_nhop_sta->sta_idx; + else + __entry->next_hop_sta = 0xff; + ), + + TP_printk("Mpath(%d): target=%pM next_hop=STA-%d", + __entry->idx, __entry->tgt_mac, __entry->next_hop_sta) +); + +DEFINE_EVENT(mesh_path_template, mesh_create_path, + TP_PROTO(struct rwnx_mesh_path *mesh_path), + TP_ARGS(mesh_path)); + +DEFINE_EVENT(mesh_path_template, mesh_delete_path, + TP_PROTO(struct rwnx_mesh_path *mesh_path), + TP_ARGS(mesh_path)); + +DEFINE_EVENT(mesh_path_template, mesh_update_path, + TP_PROTO(struct rwnx_mesh_path *mesh_path), + TP_ARGS(mesh_path)); + +#endif /* CONFIG_RWNX_FULLMAC */ + +/***************************************************************************** + * TRACE functions for RADAR + ****************************************************************************/ +#ifdef CONFIG_RWNX_RADAR +TRACE_EVENT( + radar_pulse, + TP_PROTO(u8 chain, struct radar_pulse *pulse), + TP_ARGS(chain, pulse), + TP_STRUCT__entry( + __field(u8, chain) + __field(s16, freq) + __field(u16, pri) + __field(u8, len) + __field(u8, fom) + ), + TP_fast_assign( + __entry->freq = pulse->freq * 2; + __entry->len = pulse->len * 2; + __entry->fom = pulse->fom * 6; + __entry->pri = pulse->rep; + __entry->chain = chain; + ), + + TP_printk("%s: PRI=%.5d LEN=%.3d FOM=%.2d%% freq=%dMHz ", + __print_symbolic(__entry->chain, + {RWNX_RADAR_RIU, "RIU"}, + {RWNX_RADAR_FCU, "FCU"}), + __entry->pri, __entry->len, __entry->fom, __entry->freq) + ); + +TRACE_EVENT( + radar_detected, + TP_PROTO(u8 chain, u8 region, s16 freq, u8 type, u16 pri), + TP_ARGS(chain, region, freq, type, pri), + TP_STRUCT__entry( + __field(u8, chain) + __field(u8, region) + __field(s16, freq) + __field(u8, type) + __field(u16, pri) + ), + TP_fast_assign( + __entry->chain = chain; + __entry->region = region; + __entry->freq = freq; + __entry->type = type; + __entry->pri = pri; + ), + TP_printk("%s: region=%s type=%d freq=%dMHz (pri=%dus)", + __print_symbolic(__entry->chain, + {RWNX_RADAR_RIU, "RIU"}, + {RWNX_RADAR_FCU, "FCU"}), + __print_symbolic(__entry->region, + {NL80211_DFS_UNSET, "UNSET"}, + {NL80211_DFS_FCC, "FCC"}, + {NL80211_DFS_ETSI, "ETSI"}, + {NL80211_DFS_JP, "JP"}), + __entry->type, __entry->freq, __entry->pri) +); + +TRACE_EVENT( + radar_set_region, + TP_PROTO(u8 region), + TP_ARGS(region), + TP_STRUCT__entry( + __field(u8, region) + ), + TP_fast_assign( + __entry->region = region; + ), + TP_printk("region=%s", + __print_symbolic(__entry->region, + {NL80211_DFS_UNSET, "UNSET"}, + {NL80211_DFS_FCC, "FCC"}, + {NL80211_DFS_ETSI, "ETSI"}, + {NL80211_DFS_JP, "JP"})) +); + +TRACE_EVENT( + radar_enable_detection, + TP_PROTO(u8 region, u8 enable, u8 chain), + TP_ARGS(region, enable, chain), + TP_STRUCT__entry( + __field(u8, region) + __field(u8, chain) + __field(u8, enable) + ), + TP_fast_assign( + __entry->chain = chain; + __entry->enable = enable; + __entry->region = region; + ), + TP_printk("%s: %s radar detection %s", + __print_symbolic(__entry->chain, + {RWNX_RADAR_RIU, "RIU"}, + {RWNX_RADAR_FCU, "FCU"}), + __print_symbolic(__entry->enable, + {RWNX_RADAR_DETECT_DISABLE, "Disable"}, + {RWNX_RADAR_DETECT_ENABLE, "Enable (no report)"}, + {RWNX_RADAR_DETECT_REPORT, "Enable"}), + __entry->enable == RWNX_RADAR_DETECT_DISABLE ? "" : + __print_symbolic(__entry->region, + {NL80211_DFS_UNSET, "UNSET"}, + {NL80211_DFS_FCC, "FCC"}, + {NL80211_DFS_ETSI, "ETSI"}, + {NL80211_DFS_JP, "JP"})) +); +#endif /* CONFIG_RWNX_RADAR */ + +/***************************************************************************** + * TRACE functions for IPC message + ****************************************************************************/ +#include "rwnx_strs.h" + +DECLARE_EVENT_CLASS( + ipc_msg_template, + TP_PROTO(u16 id), + TP_ARGS(id), + TP_STRUCT__entry( + __field(u16, id) + ), + TP_fast_assign( + __entry->id = id; + ), + + TP_printk("%s (%d - %d)", RWNX_ID2STR(__entry->id), + MSG_T(__entry->id), MSG_I(__entry->id)) +); + +DEFINE_EVENT(ipc_msg_template, msg_send, + TP_PROTO(u16 id), + TP_ARGS(id)); + +DEFINE_EVENT(ipc_msg_template, msg_recv, + TP_PROTO(u16 id), + TP_ARGS(id)); + + + +#endif /* !defined(_RWNX_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE rwnx_events +#include diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_dump.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_dump.c new file mode 100644 index 0000000..eb47c02 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_dump.c @@ -0,0 +1,568 @@ +/** + ****************************************************************************** + * + * @file rwnx_fw_dump.c + * + * @brief Definition of debug fs entries to process fw dump + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + + +#include +#include + +#include "rwnx_defs.h" +#include "rwnx_debugfs.h" + +static ssize_t rwnx_dbgfs_rhd_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + dump->rhd_mem, + dump->dbg_info.rhd_len); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(rhd); + +static ssize_t rwnx_dbgfs_rbd_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + dump->rbd_mem, + dump->dbg_info.rbd_len); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(rbd); + +static ssize_t rwnx_dbgfs_thdx_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos, int idx) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + &dump->thd_mem[idx], + dump->dbg_info.thd_len[idx]); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +static ssize_t rwnx_dbgfs_thd0_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 0); +} +DEBUGFS_READ_FILE_OPS(thd0); + +static ssize_t rwnx_dbgfs_thd1_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 1); +} +DEBUGFS_READ_FILE_OPS(thd1); + +static ssize_t rwnx_dbgfs_thd2_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 2); +} +DEBUGFS_READ_FILE_OPS(thd2); + +static ssize_t rwnx_dbgfs_thd3_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 3); +} +DEBUGFS_READ_FILE_OPS(thd3); + +#if (NX_TXQ_CNT == 5) +static ssize_t rwnx_dbgfs_thd4_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 4); +} +DEBUGFS_READ_FILE_OPS(thd4); +#endif + +static ssize_t rwnx_dbgfs_mactrace_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + char msg[64]; + + scnprintf(msg, sizeof(msg), "Force trigger\n"); + rwnx_dbgfs_trigger_fw_dump(priv, msg); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + dump->la_mem, + dump->dbg_info.la_conf.trace_len); + + mutex_unlock(&priv->dbgdump_elem.mutex); + + return read; +} +DEBUGFS_READ_FILE_OPS(mactrace); + +static ssize_t rwnx_dbgfs_macdiags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + dump->dbg_info.diags_mac, + DBG_DIAGS_MAC_MAX * 2); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(macdiags); + +static ssize_t rwnx_dbgfs_phydiags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + dump->dbg_info.diags_phy, + DBG_DIAGS_PHY_MAX * 2); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(phydiags); + +static ssize_t rwnx_dbgfs_hwdiags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + char buf[16]; + int ret; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "%08X\n", dump->dbg_info.hw_diag); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +DEBUGFS_READ_FILE_OPS(hwdiags); + +static ssize_t rwnx_dbgfs_plfdiags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + char buf[16]; + int ret; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "%08X\n", dump->dbg_info.la_conf.diag_conf); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +DEBUGFS_READ_FILE_OPS(plfdiags); + +static ssize_t rwnx_dbgfs_swdiags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + &dump->dbg_info.sw_diag, + dump->dbg_info.sw_diag_len); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(swdiags); + +static ssize_t rwnx_dbgfs_error_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + dump->dbg_info.error, + strlen((char *)dump->dbg_info.error)); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(error); + +static ssize_t rwnx_dbgfs_rxdesc_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + char buf[32]; + int ret; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "%08X\n%08X\n", dump->dbg_info.rhd, + dump->dbg_info.rbd); + read = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(rxdesc); + +static ssize_t rwnx_dbgfs_txdesc_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + char buf[64]; + int len = 0; + int i; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + for (i = 0; i < NX_TXQ_CNT; i++) { + len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - len - 1, count), + "%08X\n", dump->dbg_info.thd[i]); + } + + mutex_unlock(&priv->dbgdump_elem.mutex); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +DEBUGFS_READ_FILE_OPS(txdesc); + +static ssize_t rwnx_dbgfs_macrxptr_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + &dump->dbg_info.rhd_hw_ptr, + 2 * sizeof(dump->dbg_info.rhd_hw_ptr)); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} + +DEBUGFS_READ_FILE_OPS(macrxptr); + +static ssize_t rwnx_dbgfs_lamacconf_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + ssize_t read; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + read = simple_read_from_buffer(user_buf, count, ppos, + dump->dbg_info.la_conf.conf, + LA_CONF_LEN * 4); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return read; +} +DEBUGFS_READ_FILE_OPS(lamacconf); + +static ssize_t rwnx_dbgfs_chaninfo_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr; + char buf[4 * 32]; + int ret; + + mutex_lock(&priv->dbgdump_elem.mutex); + if (!priv->debugfs.trace_prst) { + mutex_unlock(&priv->dbgdump_elem.mutex); + return 0; + } + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "type: %d\n" + "prim20_freq: %d MHz\n" + "center1_freq: %d MHz\n" + "center2_freq: %d MHz\n", + (dump->dbg_info.chan_info.info1 >> 8) & 0xFF, + (dump->dbg_info.chan_info.info1 >> 16) & 0xFFFF, + (dump->dbg_info.chan_info.info2 >> 0) & 0xFFFF, + (dump->dbg_info.chan_info.info2 >> 16) & 0xFFFF); + + mutex_unlock(&priv->dbgdump_elem.mutex); + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +DEBUGFS_READ_FILE_OPS(chaninfo); + +static ssize_t rwnx_dbgfs_um_helper_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + char buf[sizeof(priv->debugfs.helper_cmd)]; + int ret; + + ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count), + "%s", priv->debugfs.helper_cmd); + + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +static ssize_t rwnx_dbgfs_um_helper_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rwnx_hw *priv = file->private_data; + int eobuf = min_t(size_t, sizeof(priv->debugfs.helper_cmd) - 1, count); + + priv->debugfs.helper_cmd[eobuf] = '\0'; + if (copy_from_user(priv->debugfs.helper_cmd, user_buf, eobuf)) + return -EFAULT; + + return count; +} + +DEBUGFS_READ_WRITE_FILE_OPS(um_helper); + +/* + * Calls a userspace pgm + */ +int rwnx_um_helper(struct rwnx_debugfs *rwnx_debugfs, const char *cmd) +{ + char *envp[] = { "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; + char **argv; + int argc, ret; + + if (!rwnx_debugfs->dir || + !strlen((cmd = cmd ? cmd : rwnx_debugfs->helper_cmd))) + return 0; + argv = argv_split(in_interrupt() ? GFP_ATOMIC : GFP_KERNEL, cmd, &argc); + if (!argc) + return PTR_ERR(argv); + + if ((ret = call_usermodehelper(argv[0], argv, envp, + UMH_WAIT_PROC | UMH_KILLABLE))) + printk(KERN_CRIT "Failed to call %s (%s returned %d)\n", + argv[0], cmd, ret); + argv_free(argv); + + return ret; +} + +static void rwnx_um_helper_work(struct work_struct *ws) +{ + struct rwnx_debugfs *rwnx_debugfs = container_of(ws, struct rwnx_debugfs, + helper_work); + struct rwnx_hw *rwnx_hw = container_of(rwnx_debugfs, struct rwnx_hw, + debugfs); + rwnx_um_helper(rwnx_debugfs, NULL); + if (!rwnx_debugfs->unregistering) + rwnx_umh_done(rwnx_hw); + rwnx_debugfs->helper_scheduled = false; +} + +int rwnx_trigger_um_helper(struct rwnx_debugfs *rwnx_debugfs) +{ + struct rwnx_hw *rwnx_hw = container_of(rwnx_debugfs, struct rwnx_hw, + debugfs); + + if (rwnx_debugfs->helper_scheduled == true) { + dev_err(rwnx_hw->dev, "%s: Already scheduled\n", __func__); + return -EBUSY; + } + + spin_lock_bh(&rwnx_debugfs->umh_lock); + if (rwnx_debugfs->unregistering) { + spin_unlock_bh(&rwnx_debugfs->umh_lock); + dev_err(rwnx_hw->dev, "%s: unregistering\n", __func__); + return -ENOENT; + } + rwnx_debugfs->helper_scheduled = true; + schedule_work(&rwnx_debugfs->helper_work); + spin_unlock_bh(&rwnx_debugfs->umh_lock); + + return 0; +} + +int rwnx_dbgfs_register_fw_dump(struct rwnx_hw *rwnx_hw, + struct dentry *dir_drv, + struct dentry *dir_diags) +{ + + struct rwnx_debugfs *rwnx_debugfs = &rwnx_hw->debugfs; + + BUILD_BUG_ON(sizeof(CONFIG_RWNX_UM_HELPER_DFLT) >= + sizeof(rwnx_debugfs->helper_cmd)); + strncpy(rwnx_debugfs->helper_cmd, + CONFIG_RWNX_UM_HELPER_DFLT, sizeof(rwnx_debugfs->helper_cmd)); + INIT_WORK(&rwnx_debugfs->helper_work, rwnx_um_helper_work); + DEBUGFS_ADD_FILE(um_helper, dir_drv, S_IWUSR | S_IRUSR); + + rwnx_debugfs->trace_prst = rwnx_debugfs->helper_scheduled = false; + spin_lock_init(&rwnx_debugfs->umh_lock); + DEBUGFS_ADD_FILE(rhd, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(rbd, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(thd0, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(thd1, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(thd2, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(thd3, dir_diags, S_IRUSR); +#if (NX_TXQ_CNT == 5) + DEBUGFS_ADD_FILE(thd4, dir_diags, S_IRUSR); +#endif + DEBUGFS_ADD_FILE(mactrace, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(macdiags, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(phydiags, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(plfdiags, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(hwdiags, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(swdiags, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(error, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(rxdesc, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(txdesc, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(macrxptr, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(lamacconf, dir_diags, S_IRUSR); + DEBUGFS_ADD_FILE(chaninfo, dir_diags, S_IRUSR); + + return 0; + + err: + return -1; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_trace.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_trace.c new file mode 100644 index 0000000..fa96583 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_trace.c @@ -0,0 +1,47 @@ +/** + ****************************************************************************** + * + * @file rwnx_fw_trace.c + * + * Copyright (C) RivieraWaves 2017-2019 + * + ****************************************************************************** + */ +#include +#include +#include +#include +#include +#include +#include +#include "rwnx_fw_trace.h" + +int rwnx_fw_log_init(struct rwnx_fw_log *fw_log) +{ + u8 *buf = kmalloc(FW_LOG_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + fw_log->buf.data = buf; + fw_log->buf.start = fw_log->buf.data; + fw_log->buf.size = 0; + fw_log->buf.end = fw_log->buf.data; + fw_log->buf.dataend = fw_log->buf.data + FW_LOG_SIZE; + spin_lock_init(&fw_log->lock); + + printk("fw_log_init: %lx, %lx\n", (unsigned long)fw_log->buf.start, (unsigned long)(fw_log->buf.dataend)); + return 0; +} + +void rwnx_fw_log_deinit(struct rwnx_fw_log *fw_log) +{ + if (!fw_log) + return; + + if (fw_log->buf.data) + kfree(fw_log->buf.data); + fw_log->buf.start = NULL; + fw_log->buf.end = NULL; + fw_log->buf.size = 0; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_trace.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_trace.h new file mode 100644 index 0000000..7956790 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_fw_trace.h @@ -0,0 +1,35 @@ +/** + ****************************************************************************** + * + * rwnx_fw_trace.h + * + * Copyright (C) RivieraWaves 2017-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_FW_TRACE_H_ +#define _RWNX_FW_TRACE_H_ + +#include +#include +#include + +#define FW_LOG_SIZE (10240) + +struct rwnx_fw_log_buf { + uint8_t *data; + uint8_t *start; + uint8_t *end; + uint8_t *dataend; + uint32_t size; +}; + +struct rwnx_fw_log { + struct rwnx_fw_log_buf buf; + spinlock_t lock; +}; + +int rwnx_fw_log_init(struct rwnx_fw_log *fw_log); +void rwnx_fw_log_deinit(struct rwnx_fw_log *fw_log); +#endif /* _RWNX_FW_TRACE_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_gki.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_gki.c new file mode 100644 index 0000000..a6bf565 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_gki.c @@ -0,0 +1,18 @@ +#include +#include + + +void rwnx_gki_skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) +{ + unsigned long flags; + struct sk_buff *prev = old; + struct sk_buff *next = prev->next; + spin_lock_irqsave(&list->lock, flags); + WRITE_ONCE(newsk->next, next); + WRITE_ONCE(newsk->prev, prev); + WRITE_ONCE(next->prev, newsk); + WRITE_ONCE(prev->next, newsk); + list->qlen++; + spin_unlock_irqrestore(&list->lock, flags); +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_gki.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_gki.h new file mode 100644 index 0000000..5ec1291 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_gki.h @@ -0,0 +1,11 @@ +#ifndef __RWNX_GKI_H +#define __RWNX_GKI_H + +#ifdef CONFIG_GKI +void rwnx_gki_skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); +#define rwnx_skb_append rwnx_gki_skb_append +#else +#define rwnx_skb_append skb_append +#endif//CONFIG_GKI + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_irqs.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_irqs.c new file mode 100644 index 0000000..65e2493 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_irqs.c @@ -0,0 +1,67 @@ +/** + ****************************************************************************** + * + * @file rwnx_irqs.c + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#include + +#include "rwnx_defs.h" +#include "ipc_host.h" +#include "rwnx_prof.h" + +/** + * rwnx_irq_hdlr - IRQ handler + * + * Handler registerd by the platform driver + */ +irqreturn_t rwnx_irq_hdlr(int irq, void *dev_id) +{ + struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)dev_id; + disable_irq_nosync(irq); + tasklet_schedule(&rwnx_hw->task); + return IRQ_HANDLED; +} + +/** + * rwnx_task - Bottom half for IRQ handler + * + * Read irq status and process accordingly + */ +void rwnx_task(unsigned long data) +{ + struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)data; + REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_RWNX_IPC_IRQ_HDLR); + +#if 0 + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + u32 status, statuses = 0; + + /* Ack unconditionnally in case ipc_host_get_status does not see the irq */ + rwnx_plat->ack_irq(rwnx_plat); + + while ((status = ipc_host_get_status(rwnx_hw->ipc_env))) { + statuses |= status; + /* All kinds of IRQs will be handled in one shot (RX, MSG, DBG, ...) + * this will ack IPC irqs not the cfpga irqs */ + ipc_host_irq(rwnx_hw->ipc_env, status); + + rwnx_plat->ack_irq(rwnx_plat); + } +#endif + //if (statuses & IPC_IRQ_E2A_RXDESC) + // rwnx_hw->stats.last_rx = now; + //if (statuses & IPC_IRQ_E2A_TXCFM) + // rwnx_hw->stats.last_tx = now; + AICWFDBG(LOGTRACE, "rwnx_task\n"); + spin_lock_bh(&rwnx_hw->tx_lock); + rwnx_hwq_process_all(rwnx_hw); + spin_unlock_bh(&rwnx_hw->tx_lock); +#if 0 + enable_irq(rwnx_platform_get_irq(rwnx_plat)); +#endif + REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_RWNX_IPC_IRQ_HDLR); +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_irqs.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_irqs.h new file mode 100644 index 0000000..d3fb451 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_irqs.h @@ -0,0 +1,20 @@ +/** + ****************************************************************************** + * + * @file rwnx_irqs.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#ifndef _RWNX_IRQS_H_ +#define _RWNX_IRQS_H_ + +#include + +/* IRQ handler to be registered by platform driver */ +irqreturn_t rwnx_irq_hdlr(int irq, void *dev_id); + +void rwnx_task(unsigned long data); + +#endif /* _RWNX_IRQS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_main.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_main.c new file mode 100755 index 0000000..fe3d4eb --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_main.c @@ -0,0 +1,9150 @@ +/** + ****************************************************************************** + * + * @file rwnx_main.c + * + * @brief Entry point of the RWNX driver + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rwnx_defs.h" +#include "rwnx_dini.h" +#include "rwnx_msg_tx.h" +#include "reg_access.h" +#include "hal_desc.h" +#include "rwnx_debugfs.h" +#include "rwnx_cfgfile.h" +#include "rwnx_irqs.h" +#include "rwnx_radar.h" +#include "rwnx_version.h" +#ifdef CONFIG_RWNX_BFMER +#include "rwnx_bfmer.h" +#endif //(CONFIG_RWNX_BFMER) +#include "rwnx_tdls.h" +#include "rwnx_events.h" +#include "rwnx_compat.h" +#include "rwnx_version.h" +#include "rwnx_main.h" +#include "aicwf_txrxif.h" +#include "aicwf_compat_8800dc.h" +#include "aicwf_compat_8800d80.h" +#include "aicwf_compat_8800d80x2.h" +#include "aic_priv_cmd.h" +#include "rwnx_wakelock.h" +#include "rwnx_msg_tx.h" +#ifdef CONFIG_BAND_STEERING +#include "aicwf_manager.h" +#endif + +#ifdef CONFIG_USE_WIRELESS_EXT +#include "aicwf_wext_linux.h" +#endif + +#ifdef AICWF_SDIO_SUPPORT +#include "aicwf_sdio.h" +#endif +#ifdef AICWF_USB_SUPPORT +#include "aicwf_usb.h" +#endif +#include + +#define RW_DRV_DESCRIPTION "RivieraWaves 11nac driver for Linux cfg80211" +#define RW_DRV_COPYRIGHT "Copyright(c) 2015-2017 RivieraWaves" +#define RW_DRV_AUTHOR "RivieraWaves S.A.S" + +#ifdef CONFIG_WOWLAN + //ff ff ff ff ff ff is magic package + u8_l wowlan_param1[] = {0x01,0x2a,0x06,0xff,0xff,0xff,0xff,0xff,0xff, + 0xff,0xff,0xff,0xff,0xff,0xff}; +#endif + +static char *wifi_mac_addr = 0; + +extern char country_code[]; + +#define RWNX_PRINT_CFM_ERR(req) \ + printk(KERN_CRIT "%s: Status Error(%d)\n", #req, (&req##_cfm)->status) + +#define RWNX_HT_CAPABILITIES \ +{ \ + .ht_supported = true, \ + .cap = 0, \ + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \ + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, \ + .mcs = { \ + .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \ + .rx_highest = cpu_to_le16(65), \ + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \ + }, \ +} + +#define RWNX_VHT_CAPABILITIES \ +{ \ + .vht_supported = false, \ + .cap = \ + (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT),\ + .vht_mcs = { \ + .rx_mcs_map = cpu_to_le16( \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14), \ + .tx_mcs_map = cpu_to_le16( \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | \ + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14), \ + } \ +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) || defined(CONFIG_HE_FOR_OLD_KERNEL) +#define RWNX_HE_CAPABILITIES \ +{ \ + .has_he = false, \ + .he_cap_elem = { \ + .mac_cap_info[0] = 0, \ + .mac_cap_info[1] = 0, \ + .mac_cap_info[2] = 0, \ + .mac_cap_info[3] = 0, \ + .mac_cap_info[4] = 0, \ + .mac_cap_info[5] = 0, \ + .phy_cap_info[0] = 0, \ + .phy_cap_info[1] = 0, \ + .phy_cap_info[2] = 0, \ + .phy_cap_info[3] = 0, \ + .phy_cap_info[4] = 0, \ + .phy_cap_info[5] = 0, \ + .phy_cap_info[6] = 0, \ + .phy_cap_info[7] = 0, \ + .phy_cap_info[8] = 0, \ + .phy_cap_info[9] = 0, \ + .phy_cap_info[10] = 0, \ + }, \ + .he_mcs_nss_supp = { \ + .rx_mcs_80 = cpu_to_le16(0xfffa), \ + .tx_mcs_80 = cpu_to_le16(0xfffa), \ + .rx_mcs_160 = cpu_to_le16(0xffff), \ + .tx_mcs_160 = cpu_to_le16(0xffff), \ + .rx_mcs_80p80 = cpu_to_le16(0xffff), \ + .tx_mcs_80p80 = cpu_to_le16(0xffff), \ + }, \ + .ppe_thres = {0x08, 0x1c, 0x07}, \ +} +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) +#define RWNX_HE_CAPABILITIES \ +{ \ + .has_he = false, \ + .he_cap_elem = { \ + .mac_cap_info[0] = 0, \ + .mac_cap_info[1] = 0, \ + .mac_cap_info[2] = 0, \ + .mac_cap_info[3] = 0, \ + .mac_cap_info[4] = 0, \ + .phy_cap_info[0] = 0, \ + .phy_cap_info[1] = 0, \ + .phy_cap_info[2] = 0, \ + .phy_cap_info[3] = 0, \ + .phy_cap_info[4] = 0, \ + .phy_cap_info[5] = 0, \ + .phy_cap_info[6] = 0, \ + .phy_cap_info[7] = 0, \ + .phy_cap_info[8] = 0, \ + }, \ + .he_mcs_nss_supp = { \ + .rx_mcs_80 = cpu_to_le16(0xfffa), \ + .tx_mcs_80 = cpu_to_le16(0xfffa), \ + .rx_mcs_160 = cpu_to_le16(0xffff), \ + .tx_mcs_160 = cpu_to_le16(0xffff), \ + .rx_mcs_80p80 = cpu_to_le16(0xffff), \ + .tx_mcs_80p80 = cpu_to_le16(0xffff), \ + }, \ + .ppe_thres = {0x08, 0x1c, 0x07}, \ +} +#endif +#endif + +#define RATE(_bitrate, _hw_rate, _flags) { \ + .bitrate = (_bitrate), \ + .flags = (_flags), \ + .hw_value = (_hw_rate), \ +} + +#define CHAN(_freq) { \ + .center_freq = (_freq), \ + .max_power = 30, /* FIXME */ \ +} + +static struct ieee80211_rate rwnx_ratetable[] = { + RATE(10, 0x00, 0), + RATE(20, 0x01, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(55, 0x02, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(110, 0x03, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(60, 0x04, 0), + RATE(90, 0x05, 0), + RATE(120, 0x06, 0), + RATE(180, 0x07, 0), + RATE(240, 0x08, 0), + RATE(360, 0x09, 0), + RATE(480, 0x0A, 0), + RATE(540, 0x0B, 0), +}; + +/* The channels indexes here are not used anymore */ +static struct ieee80211_channel rwnx_2ghz_channels[] = { + CHAN(2412), + CHAN(2417), + CHAN(2422), + CHAN(2427), + CHAN(2432), + CHAN(2437), + CHAN(2442), + CHAN(2447), + CHAN(2452), + CHAN(2457), + CHAN(2462), + CHAN(2467), + CHAN(2472), + CHAN(2484), + // Extra channels defined only to be used for PHY measures. + // Enabled only if custregd and custchan parameters are set + CHAN(2390), + CHAN(2400), + CHAN(2410), + CHAN(2420), + CHAN(2430), + CHAN(2440), + CHAN(2450), + CHAN(2460), + CHAN(2470), + CHAN(2480), + CHAN(2490), + CHAN(2500), + CHAN(2510), +}; + +//#ifdef USE_5G +static struct ieee80211_channel rwnx_5ghz_channels[] = { + CHAN(5180), // 36 - 20MHz + CHAN(5200), // 40 - 20MHz + CHAN(5220), // 44 - 20MHz + CHAN(5240), // 48 - 20MHz + CHAN(5260), // 52 - 20MHz + CHAN(5280), // 56 - 20MHz + CHAN(5300), // 60 - 20MHz + CHAN(5320), // 64 - 20MHz + CHAN(5500), // 100 - 20MHz + CHAN(5520), // 104 - 20MHz + CHAN(5540), // 108 - 20MHz + CHAN(5560), // 112 - 20MHz + CHAN(5580), // 116 - 20MHz + CHAN(5600), // 120 - 20MHz + CHAN(5620), // 124 - 20MHz + CHAN(5640), // 128 - 20MHz + CHAN(5660), // 132 - 20MHz + CHAN(5680), // 136 - 20MHz + CHAN(5700), // 140 - 20MHz + CHAN(5720), // 144 - 20MHz + CHAN(5745), // 149 - 20MHz + CHAN(5765), // 153 - 20MHz + CHAN(5785), // 157 - 20MHz + CHAN(5805), // 161 - 20MHz + CHAN(5825), // 165 - 20MHz + // Extra channels defined only to be used for PHY measures. + // Enabled only if custregd and custchan parameters are set + CHAN(5190), + CHAN(5210), + CHAN(5230), + CHAN(5250), + CHAN(5270), + CHAN(5290), + CHAN(5310), + CHAN(5330), + CHAN(5340), + CHAN(5350), + CHAN(5360), + CHAN(5370), + CHAN(5380), + CHAN(5390), + CHAN(5400), + CHAN(5410), + CHAN(5420), + CHAN(5430), + CHAN(5440), + CHAN(5450), + CHAN(5460), + CHAN(5470), + CHAN(5480), + CHAN(5490), + CHAN(5510), + CHAN(5530), + CHAN(5550), + CHAN(5570), + CHAN(5590), + CHAN(5610), + CHAN(5630), + CHAN(5650), + CHAN(5670), + CHAN(5690), + CHAN(5710), + CHAN(5730), + CHAN(5750), + CHAN(5760), + CHAN(5770), + CHAN(5780), + CHAN(5790), + CHAN(5800), + CHAN(5810), + CHAN(5820), + CHAN(5830), + CHAN(5840), + CHAN(5850), + CHAN(5860), + CHAN(5870), + CHAN(5880), + CHAN(5890), + CHAN(5900), + CHAN(5910), + CHAN(5920), + CHAN(5930), + CHAN(5940), + CHAN(5950), + CHAN(5960), + CHAN(5970), +}; +//#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) || defined(CONFIG_HE_FOR_OLD_KERNEL) +struct ieee80211_sband_iftype_data rwnx_he_capa = { + .types_mask = BIT(NL80211_IFTYPE_STATION)|BIT(NL80211_IFTYPE_AP), + .he_cap = RWNX_HE_CAPABILITIES, +}; +#endif + +static struct ieee80211_supported_band rwnx_band_2GHz = { + .channels = rwnx_2ghz_channels, + .n_channels = ARRAY_SIZE(rwnx_2ghz_channels) - 13, // -13 to exclude extra channels + .bitrates = rwnx_ratetable, + .n_bitrates = ARRAY_SIZE(rwnx_ratetable), + .ht_cap = RWNX_HT_CAPABILITIES, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + .vht_cap = RWNX_VHT_CAPABILITIES, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + .iftype_data = &rwnx_he_capa, + .n_iftype_data = 1, +#endif +}; + +//#ifdef USE_5G +static struct ieee80211_supported_band rwnx_band_5GHz = { + .channels = rwnx_5ghz_channels, + .n_channels = ARRAY_SIZE(rwnx_5ghz_channels) - 59, // -59 to exclude extra channels + .bitrates = &rwnx_ratetable[4], + .n_bitrates = ARRAY_SIZE(rwnx_ratetable) - 4, + .ht_cap = RWNX_HT_CAPABILITIES, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + .vht_cap = RWNX_VHT_CAPABILITIES, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + .iftype_data = &rwnx_he_capa, + .n_iftype_data = 1, +#endif +}; +//#endif + +static struct ieee80211_iface_limit rwnx_limits[] = { + { .max = 1, + .types = BIT(NL80211_IFTYPE_STATION)}, + { .max = 1, + .types = BIT(NL80211_IFTYPE_AP)}, +#ifdef CONFIG_USE_P2P0 + { .max = 2, +#else + { .max = 1, +#endif + .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)}, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) +#ifndef CONFIG_USE_P2P0 + { .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + } +#endif +#endif +}; + +static struct ieee80211_iface_limit rwnx_limits_dfs[] = { + { .max = NX_VIRT_DEV_MAX, .types = BIT(NL80211_IFTYPE_AP)} +}; + +static const struct ieee80211_iface_combination rwnx_combinations[] = { + { + .limits = rwnx_limits, + .n_limits = ARRAY_SIZE(rwnx_limits), +#ifdef CONFIG_MCC + .num_different_channels = NX_CHAN_CTXT_CNT, +#else + .num_different_channels = 1, +#endif + .max_interfaces = NX_VIRT_DEV_MAX, + }, + /* Keep this combination as the last one */ + { + .limits = rwnx_limits_dfs, + .n_limits = ARRAY_SIZE(rwnx_limits_dfs), + .num_different_channels = 1, + .max_interfaces = NX_VIRT_DEV_MAX, + .radar_detect_widths = (BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80)), + } +}; + +/* There isn't a lot of sense in it, but you can transmit anything you like */ +static struct ieee80211_txrx_stypes +rwnx_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_STATION] = { + .tx = 0xffff, + .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4)), + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = (BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4)), + }, + [NL80211_IFTYPE_AP_VLAN] = { + /* copy AP */ + .tx = 0xffff, + .rx = (BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4)), + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = 0xffff, + .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4)), + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = 0xffff, + .rx = (BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4)), + }, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = 0xffff, + .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4)), + }, +#endif + [NL80211_IFTYPE_MESH_POINT] = { + .tx = 0xffff, + .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4)), + }, +}; + + +static u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, // reserved entries to enable AES-CMAC and/or SMS4 + WLAN_CIPHER_SUITE_SMS4, + 0, +}; +#define NB_RESERVED_CIPHER 1; + +static const int rwnx_ac2hwq[1][NL80211_NUM_ACS] = { + { + [NL80211_TXQ_Q_VO] = RWNX_HWQ_VO, + [NL80211_TXQ_Q_VI] = RWNX_HWQ_VI, + [NL80211_TXQ_Q_BE] = RWNX_HWQ_BE, + [NL80211_TXQ_Q_BK] = RWNX_HWQ_BK + } +}; + +const int rwnx_tid2hwq[IEEE80211_NUM_TIDS] = { + RWNX_HWQ_BE, + RWNX_HWQ_BK, + RWNX_HWQ_BK, + RWNX_HWQ_BE, + RWNX_HWQ_VI, + RWNX_HWQ_VI, + RWNX_HWQ_VO, + RWNX_HWQ_VO, + /* TID_8 is used for management frames */ + RWNX_HWQ_VO, + /* At the moment, all others TID are mapped to BE */ + RWNX_HWQ_BE, + RWNX_HWQ_BE, + RWNX_HWQ_BE, + RWNX_HWQ_BE, + RWNX_HWQ_BE, + RWNX_HWQ_BE, + RWNX_HWQ_BE, +}; + +static const int rwnx_hwq2uapsd[NL80211_NUM_ACS] = { + [RWNX_HWQ_VO] = IEEE80211_WMM_IE_STA_QOSINFO_AC_VO, + [RWNX_HWQ_VI] = IEEE80211_WMM_IE_STA_QOSINFO_AC_VI, + [RWNX_HWQ_BE] = IEEE80211_WMM_IE_STA_QOSINFO_AC_BE, + [RWNX_HWQ_BK] = IEEE80211_WMM_IE_STA_QOSINFO_AC_BK, +}; + +#define P2P_ALIVE_TIME_MS (1*1000) +#define P2P_ALIVE_TIME_COUNT 200 + +struct semaphore aicwf_deinit_sem; +atomic_t aicwf_deinit_atomic; + +int aicwf_dbg_level = LOGERROR|LOGINFO; +module_param(aicwf_dbg_level, int, 0660); +#ifdef CONFIG_DYNAMIC_PWR +int dynamic_pwr = 1; +module_param(dynamic_pwr, int, 0660); +#endif + +int testmode = 0; +char aic_fw_path[200]; + +void rwnx_skb_align_8bytes(struct sk_buff *skb){ +#ifdef CONFIG_ALIGN_8BYTES + int align __maybe_unused; + u8 *data; + size_t len = 0; + + align = ((unsigned long)(skb->data + 14)) & 7; + if (align) { + if (WARN_ON(skb_headroom(skb) < 7)) { + dev_kfree_skb(skb); + skb = NULL; + } else { + //printk("AIDEN align1:%d rx_skb->data + 14:%p\r\n", align, skb->data + 14); + data = skb->data; + len = skb_headlen(skb); + skb->data -= align; + memmove(skb->data, data, len); + skb_set_tail_pointer(skb, len); + //printk("AIDEN align2:%d rx_skb->data + 14:%p\r\n", align, skb->data + 14); + } + } +#endif +} + +int rwnx_init_cmd_array(void); +void rwnx_free_cmd_array(void); + +void rwnx_data_dump(char* tag, void* data, unsigned long len){ + unsigned long i = 0; + char* data_ = (char* )data; + + AICWFDBG(LOGDATA, "%s %s len:(%lu)\r\n", __func__, tag, len); + + for (i = 0; i < len; i += 16){ + AICWFDBG(LOGDATA, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\r\n", + data_[0 + i], + data_[1 + i], + data_[2 + i], + data_[3 + i], + data_[4 + i], + data_[5 + i], + data_[6 + i], + data_[7 + i], + data_[8 + i], + data_[9 + i], + data_[10 + i], + data_[11 + i], + data_[12 + i], + data_[13 + i], + data_[14 + i], + data_[15 + i]); + } + +} + +#define ASSOC_REQ 0x00 +#define ASSOC_RSP 0x10 +#define PROBE_REQ 0x40 +#define PROBE_RSP 0x50 +#define ACTION 0xD0 +#define AUTH 0xB0 +#define DEAUTH 0xC0 +#define QOS 0x88 + +#define ACTION_MAC_HDR_LEN 24 +#define QOS_MAC_HDR_LEN 26 + +void rwnx_frame_parser(char* tag, char* data, unsigned long len){ + char print_data[100]; + int print_index = 0; + + memset(print_data, 0, 100); + + if(data[0] == ASSOC_REQ){ + sprintf(&print_data[print_index], "%s", "ASSOC_REQ \r\n"); + print_index = strlen(print_data); + }else if(data[0] == ASSOC_RSP){ + sprintf(&print_data[print_index], "%s", "ASSOC_RSP \r\n"); + print_index = strlen(print_data); + }else if(data[0] == PROBE_REQ){ + sprintf(&print_data[print_index], "%s", "PROBE_REQ \r\n"); + print_index = strlen(print_data); + }else if(data[0] == PROBE_RSP){ + sprintf(&print_data[print_index], "%s", "PROBE_RSP \r\n"); + print_index = strlen(print_data); + }else if(data[0] == ACTION){ + sprintf(&print_data[print_index], "%s", "ACTION "); + print_index = strlen(print_data); + if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x00){ + sprintf(&print_data[print_index], "%s", "GO_NEG_REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x01){ + sprintf(&print_data[print_index], "%s", "GO_NEG_RSP \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x02){ + sprintf(&print_data[print_index], "%s", "GO_NEG_CFM \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x03){ + sprintf(&print_data[print_index], "%s", "P2P_INV_REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x04){ + sprintf(&print_data[print_index], "%s", "P2P_INV_RSP \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x05){ + sprintf(&print_data[print_index], "%s", "DD_REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x06){ + sprintf(&print_data[print_index], "%s", "DD_RSP \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x07){ + sprintf(&print_data[print_index], "%s", "PD_REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x08){ + sprintf(&print_data[print_index], "%s", "PD_RSP \r\n"); + }else{ + sprintf(&print_data[print_index], "%s(0x%x 0x%x) \r\n", "UNKNOW", + data[ACTION_MAC_HDR_LEN], data[ACTION_MAC_HDR_LEN + 6]); + } + print_index = strlen(print_data); + }else if(data[0] == AUTH){ + sprintf(&print_data[print_index], "%s", "AUTH \r\n"); + print_index = strlen(print_data); + }else if(data[0] == DEAUTH){ + sprintf(&print_data[print_index], "%s", "DEAUTH \r\n"); + print_index = strlen(print_data); + }else if(data[0] == QOS){ + if(data[QOS_MAC_HDR_LEN + 6] == 0x88 && data[QOS_MAC_HDR_LEN + 7] == 0x8E){ + sprintf(&print_data[print_index], "%s", "QOS_802.1X "); + if(data[QOS_MAC_HDR_LEN + 9] == 0x03){ + sprintf(&print_data[print_index], "%s", "EAPOL \r\n"); + }else if(data[QOS_MAC_HDR_LEN + 9] == 0x00){ + sprintf(&print_data[print_index], "%s", "EAP_PACKAGE "); + print_index = strlen(print_data); + if(data[QOS_MAC_HDR_LEN + 12] == 0x01){ + sprintf(&print_data[print_index], "%s", "EAP_REQ \r\n"); + }else if(data[QOS_MAC_HDR_LEN + 12] == 0x02){ + sprintf(&print_data[print_index], "%s", "EAP_RSP \r\n"); + }else if(data[QOS_MAC_HDR_LEN + 12] == 0x04){ + sprintf(&print_data[print_index], "%s", "EAP_FAIL \r\n"); + }else{ + sprintf(&print_data[print_index], "%s", "UNKNOW \r\n"); + } + }else if(data[QOS_MAC_HDR_LEN + 9] == 0x01){ + sprintf(&print_data[print_index], "%s","EAP_START \r\n"); + } + print_index = strlen(print_data); + } + } + + if(print_index > 0){ + AICWFDBG(LOGDATA, "%s %s", tag, print_data); + } + +#if 0 + if(data[0] == ASSOC_REQ){ + printk("%s %s ASSOC_REQ \r\n", __func__, tag); + }else if(data[0] == ASSOC_RSP){ + printk("%s %s ASSOC_RSP \r\n", __func__, tag); + }else if(data[0] == PROBE_REQ){ + printk("%s %s PROBE_REQ \r\n", __func__, tag); + }else if(data[0] == PROBE_RSP){ + printk("%s %s PROBE_RSP \r\n", __func__, tag); + }else if(data[0] == ACTION){ + printk("%s %s ACTION ", __func__, tag); + if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x00){ + printk("GO NEG REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x01){ + printk("GO NEG RSP \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x02){ + printk("GO NEG CFM \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x03){ + printk("P2P INV REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x04){ + printk("P2P INV RSP \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x05){ + printk("DD REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x06){ + printk("DD RSP \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x07){ + printk("PD REQ \r\n"); + }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x08){ + printk("PD RSP \r\n"); + }else{ + printk("\r\n"); + } + + }else if(data[0] == AUTH){ + printk("%s %s AUTH \r\n", __func__, tag); + }else if(data[0] == DEAUTH){ + printk("%s %s DEAUTH \r\n", __func__, tag); + }else if(data[0] == QOS){ + if(data[QOS_MAC_HDR_LEN + 6] == 0x88 && data[QOS_MAC_HDR_LEN + 7] == 0x8E){ + printk("%s %s QOS 802.1X ", __func__, tag); + if(data[QOS_MAC_HDR_LEN + 9] == 0x03){ + printk("EAPOL"); + }else if(data[QOS_MAC_HDR_LEN + 9] == 0x00){ + printk("EAP PACKAGE "); + if(data[QOS_MAC_HDR_LEN + 12] == 0x01){ + printk("EAP REQ\r\n"); + }else if(data[QOS_MAC_HDR_LEN + 12] == 0x02){ + printk("EAP RSP\r\n"); + }else if(data[QOS_MAC_HDR_LEN + 12] == 0x04){ + printk("EAP FAIL\r\n"); + }else{ + printk("\r\n"); + } + }else if(data[QOS_MAC_HDR_LEN + 9] == 0x01){ + printk("EAP START \r\n"); + + } + } + } +#endif +} + +/********************************************************************* + * helper + *********************************************************************/ +struct rwnx_sta *rwnx_get_sta(struct rwnx_hw *rwnx_hw, const u8 *mac_addr) +{ + int i; + + for (i = 0; i < NX_REMOTE_STA_MAX; i++) { + struct rwnx_sta *sta = &rwnx_hw->sta_table[i]; + if (sta->valid && (memcmp(mac_addr, &sta->mac_addr, 6) == 0)) + return sta; + } + + return NULL; +} + +void rwnx_enable_wapi(struct rwnx_hw *rwnx_hw) +{ + //cipher_suites[rwnx_hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_SMS4; + //rwnx_hw->wiphy->n_cipher_suites ++; + rwnx_hw->wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; +} + +void rwnx_enable_mfp(struct rwnx_hw *rwnx_hw) +{ + cipher_suites[rwnx_hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; + rwnx_hw->wiphy->n_cipher_suites ++; +} + +#ifdef CONFIG_SET_VENDOR_EXTENSION_IE +extern u8_l vendor_extension_data[256]; +extern u8_l vendor_extension_len; + +void rwnx_insert_vendor_extension_in_bcn(struct rwnx_bcn *bcn){ + u8_l temp_ie[256]; + u8_l vendor_extension_subelement[3] = {0x00,0x37,0x2A}; + u8_l vendor_extension_id[2] = {0x10,0x49}; + u8_l wps_ie[1] = {0xDD}; + u8_l wps_len_index = 0; + int index = 0; + int vendor_extension_subelement_len = 0; + int find_wps_ie = 0; + + memset(temp_ie, 0, 256); + + //find wps_ie + for(index = 0; index < bcn->tail_len; index++){ + if(bcn->tail[index] == wps_ie[0]){ + find_wps_ie = 1; + wps_len_index = index + 1; + } + + if(find_wps_ie && bcn->tail[index] == vendor_extension_id[0]){ + if(bcn->tail[index + 1] == vendor_extension_id[1]){ + break; + } + } + } + + + //find vendor_extension_subelement + for(index = 0; index < bcn->tail_len; index++){ + if(bcn->tail[index] == vendor_extension_id[0]){ + index++; + if(index == bcn->tail_len){ + return; + } + if(bcn->tail[index] == vendor_extension_id[1] && + bcn->tail[index + 3] == vendor_extension_subelement[0]&& + bcn->tail[index + 4] == vendor_extension_subelement[1]&& + bcn->tail[index + 5] == vendor_extension_subelement[2]){ + index = index + 2; + vendor_extension_subelement_len = bcn->tail[index]; + printk("%s find vendor_extension_subelement,index:%d len:%d\r\n", __func__, index, bcn->tail[index]); + break; + } + } + } + index = index + vendor_extension_subelement_len; + + //insert vendor extension + memcpy(&temp_ie[0], bcn->tail, index + 1); + memcpy(&temp_ie[index + 1], vendor_extension_data, vendor_extension_len/*sizeof(vendor_extension_data)*/);//insert vendor extension data + memcpy(&temp_ie[index + 1 + vendor_extension_len/*sizeof(vendor_extension_data)*/], &bcn->tail[index + 1], bcn->tail_len - index); + + memcpy(bcn->tail, temp_ie, bcn->tail_len + vendor_extension_len/*sizeof(vendor_extension_data)*/); + bcn->tail_len = bcn->tail_len + vendor_extension_len/*sizeof(vendor_extension_data)*/; + + bcn->tail[wps_len_index] = bcn->tail[wps_len_index] + vendor_extension_len; + //rwnx_data_dump((char*)__func__, (void*)ie_req->ie, ie_req->add_ie_len); +} +#endif + +u8 *rwnx_build_bcn(struct rwnx_bcn *bcn, struct cfg80211_beacon_data *new) +{ + u8 *buf, *pos; + + if (new->head) { + u8 *head = kmalloc(new->head_len, GFP_KERNEL); + + if (!head) + return NULL; + + if (bcn->head) + kfree(bcn->head); + + bcn->head = head; + bcn->head_len = new->head_len; + memcpy(bcn->head, new->head, new->head_len); + } + if (new->tail) { + u8 *tail = kmalloc(new->tail_len, GFP_KERNEL); + + if (!tail) + return NULL; + + if (bcn->tail) + kfree(bcn->tail); + + bcn->tail = tail; + bcn->tail_len = new->tail_len; + memcpy(bcn->tail, new->tail, new->tail_len); + } + + if (!bcn->head) + return NULL; + + bcn->tim_len = 6; + bcn->len = bcn->head_len + bcn->tail_len + bcn->ies_len + bcn->tim_len; +#ifdef CONFIG_SET_VENDOR_EXTENSION_IE + buf = kmalloc(bcn->len + vendor_extension_len, GFP_KERNEL); +#else + buf = kmalloc(bcn->len, GFP_KERNEL); +#endif + if (!buf) + return NULL; + + // Build the beacon buffer + pos = buf; + memcpy(pos, bcn->head, bcn->head_len); + pos += bcn->head_len; + *pos++ = WLAN_EID_TIM; + *pos++ = 4; + *pos++ = 0; + *pos++ = bcn->dtim; + *pos++ = 0; + *pos++ = 0; + if (bcn->tail) { +#ifdef CONFIG_SET_VENDOR_EXTENSION_IE + rwnx_insert_vendor_extension_in_bcn(bcn); +#endif + memcpy(pos, bcn->tail, bcn->tail_len); + pos += bcn->tail_len; + } + if (bcn->ies) { + memcpy(pos, bcn->ies, bcn->ies_len); + } + + return buf; +} + + +static void rwnx_del_bcn(struct rwnx_bcn *bcn) +{ + if (bcn->head) { + kfree(bcn->head); + bcn->head = NULL; + } + bcn->head_len = 0; + + if (bcn->tail) { + kfree(bcn->tail); + bcn->tail = NULL; + } + bcn->tail_len = 0; + + if (bcn->ies) { + kfree(bcn->ies); + bcn->ies = NULL; + } + bcn->ies_len = 0; + bcn->tim_len = 0; + bcn->dtim = 0; + bcn->len = 0; +} + +/** + * Link channel ctxt to a vif and thus increments count for this context. + */ +void rwnx_chanctx_link(struct rwnx_vif *vif, u8 ch_idx, + struct cfg80211_chan_def *chandef) +{ + struct rwnx_chanctx *ctxt; + + if (ch_idx >= NX_CHAN_CTXT_CNT) { + WARN(1, "Invalid channel ctxt id %d", ch_idx); + return; + } + + vif->ch_index = ch_idx; + ctxt = &vif->rwnx_hw->chanctx_table[ch_idx]; + ctxt->count++; + + // For now chandef is NULL for STATION interface + if (chandef) { + if (!ctxt->chan_def.chan) + ctxt->chan_def = *chandef; + else { + // TODO. check that chandef is the same as the one already + // set for this ctxt + } + } +} + +/** + * Unlink channel ctxt from a vif and thus decrements count for this context + */ +void rwnx_chanctx_unlink(struct rwnx_vif *vif) +{ + struct rwnx_chanctx *ctxt; + + if (vif->ch_index == RWNX_CH_NOT_SET) + return; + + ctxt = &vif->rwnx_hw->chanctx_table[vif->ch_index]; + + if (ctxt->count == 0) { + WARN(1, "Chan ctxt ref count is already 0"); + } else { + ctxt->count--; + } + + if (ctxt->count == 0) { + if (vif->ch_index == vif->rwnx_hw->cur_chanctx) { + /* If current chan ctxt is no longer linked to a vif + disable radar detection (no need to check if it was activated) */ + rwnx_radar_detection_enable(&vif->rwnx_hw->radar, + RWNX_RADAR_DETECT_DISABLE, + RWNX_RADAR_RIU); + } + /* set chan to null, so that if this ctxt is relinked to a vif that + don't have channel information, don't use wrong information */ + ctxt->chan_def.chan = NULL; + } + vif->ch_index = RWNX_CH_NOT_SET; +} + +int rwnx_chanctx_valid(struct rwnx_hw *rwnx_hw, u8 ch_idx) +{ + if (ch_idx >= NX_CHAN_CTXT_CNT || + rwnx_hw->chanctx_table[ch_idx].chan_def.chan == NULL) { + return 0; + } + + return 1; +} + +static void rwnx_del_csa(struct rwnx_vif *vif) +{ + //struct rwnx_hw *rwnx_hw = vif->rwnx_hw; + struct rwnx_csa *csa = vif->ap.csa; + + if (!csa) + return; + + rwnx_del_bcn(&csa->bcn); + kfree(csa); + vif->ap.csa = NULL; +} +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) +static void rwnx_csa_finish(struct work_struct *ws) +{ + + struct rwnx_csa *csa = container_of(ws, struct rwnx_csa, work); + struct rwnx_vif *vif = csa->vif; + struct rwnx_hw *rwnx_hw = vif->rwnx_hw; + int error = csa->status; + u8 *buf, *pos; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + buf = kmalloc(csa->bcn.len, GFP_KERNEL); + if (!buf) { + printk ("%s buf fail\n", __func__); + return; + } + pos = buf; + + memcpy(pos, csa->bcn.head, csa->bcn.head_len); + pos += csa->bcn.head_len; + *pos++ = WLAN_EID_TIM; + *pos++ = 4; + *pos++ = 0; + *pos++ = csa->bcn.dtim; + *pos++ = 0; + *pos++ = 0; + if (csa->bcn.tail) { + memcpy(pos, csa->bcn.tail, csa->bcn.tail_len); + pos += csa->bcn.tail_len; + } + if (csa->bcn.ies) { + memcpy(pos, csa->bcn.ies, csa->bcn.ies_len); + } + + if (!error) { + error = rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, csa->bcn.len); + if (error) + return; + error = rwnx_send_bcn_change(rwnx_hw, vif->vif_index, csa->elem.dma_addr, + csa->bcn.len, csa->bcn.head_len, + csa->bcn.tim_len, NULL); + } + + if (error) { + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + cfg80211_stop_iface(rwnx_hw->wiphy, &vif->wdev, GFP_KERNEL); + #else + cfg80211_disconnected(vif->ndev, 0, NULL, 0, 0, GFP_KERNEL); + #endif + } else { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) + wiphy_lock(rwnx_hw->wiphy); +#else + mutex_lock(&vif->wdev.mtx); + __acquire(&vif->wdev.mtx); +#endif + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_chanctx_unlink(vif); + rwnx_chanctx_link(vif, csa->ch_idx, &csa->chandef); + if (rwnx_hw->cur_chanctx == csa->ch_idx) { + rwnx_radar_detection_enable_on_cur_channel(rwnx_hw); + rwnx_txq_vif_start(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + } else + rwnx_txq_vif_stop(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + spin_unlock_bh(&rwnx_hw->cb_lock); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 9, 0)) + cfg80211_ch_switch_notify(vif->ndev, &csa->chandef, 0); +#elif (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION3) + cfg80211_ch_switch_notify(vif->ndev, &csa->chandef, 0, 0); +#elif (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION) + cfg80211_ch_switch_notify(vif->ndev, &csa->chandef, 0); +#else + cfg80211_ch_switch_notify(vif->ndev, &csa->chandef); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) + wiphy_unlock(rwnx_hw->wiphy); +#else + mutex_unlock(&vif->wdev.mtx); + __release(&vif->wdev.mtx); +#endif + } + rwnx_del_csa(vif); +} +#endif + +/** + * rwnx_external_auth_enable - Enable external authentication on a vif + * + * @vif: VIF on which external authentication must be enabled + * + * External authentication requires to start TXQ for unknown STA in + * order to send auth frame pusehd by user space. + * Note: It is assumed that fw is on the correct channel. + */ +void rwnx_external_auth_enable(struct rwnx_vif *vif) +{ + vif->sta.external_auth = true; + rwnx_txq_unk_vif_init(vif); + rwnx_txq_start(rwnx_txq_vif_get(vif, NX_UNK_TXQ_TYPE), 0); +} + +/** + * rwnx_external_auth_disable - Disable external authentication on a vif + * + * @vif: VIF on which external authentication must be disabled + */ +void rwnx_external_auth_disable(struct rwnx_vif *vif) +{ + if (!vif->sta.external_auth) + return; + + vif->sta.external_auth = false; + rwnx_txq_unk_vif_deinit(vif); +} + +/** + * rwnx_update_mesh_power_mode - + * + * @vif: mesh VIF for which power mode is updated + * + * Does nothing if vif is not a mesh point interface. + * Since firmware doesn't support one power save mode per link select the + * most "active" power mode among all mesh links. + * Indeed as soon as we have to be active on one link we might as well be + * active on all links. + * + * If there is no link then the power mode for next peer is used; + */ +void rwnx_update_mesh_power_mode(struct rwnx_vif *vif) +{ + enum nl80211_mesh_power_mode mesh_pm; + struct rwnx_sta *sta; + struct mesh_config mesh_conf; + struct mesh_update_cfm cfm; + u32 mask; + + if (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_MESH_POINT) + return; + + if (list_empty(&vif->ap.sta_list)) { + mesh_pm = vif->ap.next_mesh_pm; + } else { + mesh_pm = NL80211_MESH_POWER_DEEP_SLEEP; + list_for_each_entry(sta, &vif->ap.sta_list, list) { + if (sta->valid && (sta->mesh_pm < mesh_pm)) { + mesh_pm = sta->mesh_pm; + } + } + } + + if (mesh_pm == vif->ap.mesh_pm) + return; + + mask = BIT(NL80211_MESHCONF_POWER_MODE - 1); + mesh_conf.power_mode = mesh_pm; + if (rwnx_send_mesh_update_req(vif->rwnx_hw, vif, mask, &mesh_conf, &cfm) || + cfm.status) + return; + + vif->ap.mesh_pm = mesh_pm; +} + +#ifdef CONFIG_BAND_STEERING +void aicwf_steering_work(struct work_struct *work) +{ + struct rwnx_vif *rwnx_vif = container_of(work, struct rwnx_vif, steer_work); + struct rwnx_sta *sta; + + if (rwnx_vif->up == false) { + AICWFDBG(LOGERROR, "%s vif is down\n", __func__); + return; + } + + //printk("%s\n", __func__); + aicwf_nl_send_intf_rpt_msg(rwnx_vif); + aicwf_nl_send_time_tick_msg(rwnx_vif); + aicwf_band_steering_expire(rwnx_vif); + + spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock); + if(list_empty(&rwnx_vif->ap.sta_list)) { + spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock); + goto finish; + } + list_for_each_entry(sta, &rwnx_vif->ap.sta_list, list) { + if (sta->valid) { + sta->link_time +=2; + aicwf_nl_send_sta_rpt_msg(rwnx_vif, sta); + } + } + spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock); + +finish: + mod_timer(&rwnx_vif->steer_timer, jiffies + msecs_to_jiffies(STEER_UPFATE_TIME)); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +void aicwf_steering_timeout(ulong data) +#else +void aicwf_steering_timeout(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + struct rwnx_vif *rwnx_vif = (struct rwnx_vif *)data; +#else + struct rwnx_vif *rwnx_vif = container_of(t, struct rwnx_vif, steer_timer); +#endif + + if (rwnx_vif->up == false) { + AICWFDBG(LOGERROR, "%s vif is down\n", __func__); + return; + } + + if (!work_pending(&rwnx_vif->steer_work)) + schedule_work(&rwnx_vif->steer_work); + + return; +} +#endif + + +#ifdef CONFIG_BR_SUPPORT +void netdev_br_init(struct net_device *netdev) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(netdev); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) + rcu_read_lock(); +#endif + + /* if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) */ + { + /* struct net_bridge *br = netdev->br_port->br; */ /* ->dev->dev_addr; */ + #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) + if (netdev->br_port) + #else + if (rcu_dereference(rwnx_vif->ndev->rx_handler_data)) + #endif + { + struct net_device *br_netdev; + + br_netdev = dev_get_by_name(&init_net, CONFIG_BR_SUPPORT_BRNAME); + if (br_netdev) { + memcpy(rwnx_vif->br_mac, br_netdev->dev_addr, ETH_ALEN); + dev_put(br_netdev); + AICWFDBG(LOGINFO, FUNC_NDEV_FMT" bind bridge dev "NDEV_FMT"("MAC_FMT")\n" + , FUNC_NDEV_ARG(netdev), NDEV_ARG(br_netdev), MAC_ARG(br_netdev->dev_addr)); + } else { + AICWFDBG(LOGINFO, FUNC_NDEV_FMT" can't get bridge dev by name \"%s\"\n" + , FUNC_NDEV_ARG(netdev), CONFIG_BR_SUPPORT_BRNAME); + } + } + + rwnx_vif->ethBrExtInfo.addPPPoETag = 1; + } + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) + rcu_read_unlock(); +#endif +} +#endif /* CONFIG_BR_SUPPORT */ + +void rwnx_set_conn_state(atomic_t *drv_conn_state, int state){ + + if((int)atomic_read(drv_conn_state) != state){ + AICWFDBG(LOGDEBUG, "%s drv_conn_state:%p %s --> %s \r\n", __func__, + drv_conn_state, + s_conn_state[(int)atomic_read(drv_conn_state)], + s_conn_state[state]); + + atomic_set(drv_conn_state, state); + } +} + + +/********************************************************************* + * netdev callbacks + ********************************************************************/ +/** + * int (*ndo_open)(struct net_device *dev); + * This function is called when network device transistions to the up + * state. + * + * - Start FW if this is the first interface opened + * - Add interface at fw level + */ +static int rwnx_open(struct net_device *dev) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct mm_add_if_cfm add_if_cfm; + int error = 0; + int err = 0; + u8 rwnx_rx_gain = 0x0E; + int waiting_counter = 10; + + RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifdef CONFIG_WOWLAN + rwnx_send_set_pkt_filter_req(rwnx_hw, wowlan_param1); +#endif + while(test_bit(RWNX_DEV_STARTED, &rwnx_vif->drv_flags)){ + msleep(100); + AICWFDBG(LOGDEBUG, "%s waiting for rwnx_close \r\n", __func__); + waiting_counter--; + if(waiting_counter == 0){ + AICWFDBG(LOGERROR, "%s error waiting for close time out \r\n", __func__); + break; + } + } + // Check if it is the first opened VIF + if (rwnx_hw->vif_started == 0) + { + // Start the FW + if ((error = rwnx_send_start(rwnx_hw))) + return error; + + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW) { + error = rwnx_send_dbg_mem_mask_write_req(rwnx_hw, 0x4033b300, 0xFF, rwnx_rx_gain); + if(error){ + return error; + } + } +#ifdef CONFIG_COEX + if (testmode == 0) { + rwnx_send_coex_req(rwnx_hw, 0, 1); + } +#endif + + /* Device is now started */ + } + + set_bit(RWNX_DEV_STARTED, &rwnx_vif->drv_flags); + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTED); + AICWFDBG(LOGDEBUG, "%s rwnx_vif->drv_flags:%d\r\n", __func__, (int)rwnx_vif->drv_flags); + + + if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO) { + if (!rwnx_hw->is_p2p_alive) { + if (rwnx_hw->p2p_dev_vif && !rwnx_hw->p2p_dev_vif->up) { + err = rwnx_send_add_if (rwnx_hw, rwnx_hw->p2p_dev_vif->wdev.address, + RWNX_VIF_TYPE(rwnx_hw->p2p_dev_vif), false, &add_if_cfm); + if (err) { + return -EIO; + } + + if (add_if_cfm.status != 0) { + return -EIO; + } + + /* Save the index retrieved from LMAC */ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_hw->p2p_dev_vif->vif_index = add_if_cfm.inst_nbr; + rwnx_hw->p2p_dev_vif->up = true; + rwnx_hw->vif_started++; + rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_hw->p2p_dev_vif; + spin_unlock_bh(&rwnx_hw->cb_lock); + } + rwnx_hw->is_p2p_alive = 1; +#ifndef CONFIG_USE_P2P0 + mod_timer(&rwnx_hw->p2p_alive_timer, jiffies + msecs_to_jiffies(1000)); + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); +#endif + } + } + + if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN) { + /* For AP_vlan use same fw and drv indexes. We ensure that this index + will not be used by fw for another vif by taking index >= NX_VIRT_DEV_MAX */ + add_if_cfm.inst_nbr = rwnx_vif->drv_vif_index; + netif_tx_stop_all_queues(dev); + + /* Save the index retrieved from LMAC */ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_vif->vif_index = add_if_cfm.inst_nbr; + rwnx_vif->up = true; + rwnx_hw->vif_started++; + rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif; + AICWFDBG(LOGDEBUG, "%s ap create vif in rwnx_hw->vif_table[%d] \r\n", + __func__, rwnx_vif->vif_index); + spin_unlock_bh(&rwnx_hw->cb_lock); + } else { + /* Forward the information to the LMAC, + * p2p value not used in FMAC configuration, iftype is sufficient */ + if ((error = rwnx_send_add_if(rwnx_hw, rwnx_vif->wdev.address, + RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm))) { + AICWFDBG(LOGERROR, "add if fail\n"); + return error; + } + + if (add_if_cfm.status != 0) { + RWNX_PRINT_CFM_ERR(add_if); + return -EIO; + } + + /* Save the index retrieved from LMAC */ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_vif->vif_index = add_if_cfm.inst_nbr; + rwnx_vif->up = true; + rwnx_hw->vif_started++; + rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif; + AICWFDBG(LOGDEBUG, "%s sta create vif in rwnx_hw->vif_table[%d] \r\n", + __func__, rwnx_vif->vif_index); + spin_unlock_bh(&rwnx_hw->cb_lock); +#ifdef CONFIG_USE_P2P0 + if(rwnx_vif->is_p2p_vif){ + rwnx_hw->p2p_dev_vif = rwnx_vif; + rwnx_hw->is_p2p_alive = 1; + } +#endif + } + + if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MONITOR){ + rwnx_hw->monitor_vif = rwnx_vif->vif_index; + if (rwnx_vif->ch_index != RWNX_CH_NOT_SET){ + //Configure the monitor channel + error = rwnx_send_config_monitor_req(rwnx_hw, &rwnx_hw->chanctx_table[rwnx_vif->ch_index].chan_def, NULL); + } + #if defined(CONFIG_RWNX_MON_XMIT) + rwnx_txq_unk_vif_init(rwnx_vif); + #endif + #if defined(CONFIG_RWNX_MON_RXFILTER) + rwnx_send_set_filter(rwnx_hw, (FIF_BCN_PRBRESP_PROMISC | FIF_OTHER_BSS | FIF_PSPOLL | FIF_PROBE_REQ)); + #endif + } + + //netif_carrier_off(dev); + #if defined(CONFIG_RWNX_MON_XMIT) + netif_carrier_on(dev); + AICWFDBG(LOGINFO, "monitor xmit: netif_carrier_on\n"); + #endif + +#ifdef CONFIG_BR_SUPPORT + netdev_br_init(dev); +#endif /* CONFIG_BR_SUPPORT */ + + netif_carrier_off(dev); + netif_start_queue(dev); + + return error; +} + +/** + * int (*ndo_stop)(struct net_device *dev); + * This function is called when network device transistions to the down + * state. + * + * - Remove interface at fw level + * - Reset FW if this is the last interface opened + */ +static int rwnx_close(struct net_device *dev) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct aicwf_bus *bus_if = NULL; + int ret = 0; + int waiting_counter = 20; + int test_counter = 0; +#if defined(AICWF_USB_SUPPORT) + struct aic_usb_dev *usbdev = NULL; + bus_if = dev_get_drvdata(rwnx_hw->dev); + if (bus_if) + usbdev = bus_if->bus_priv.usb; +#endif +#if defined(AICWF_SDIO_SUPPORT) + struct aic_sdio_dev *sdiodev = NULL; + bus_if = dev_get_drvdata(rwnx_hw->dev); + if (bus_if) + sdiodev = bus_if->bus_priv.sdio; +#endif + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + test_counter = waiting_counter; + while(atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_DISCONNECTING|| + atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTING){ + AICWFDBG(LOGDEBUG, "%s wifi is connecting or disconnecting, waiting 200ms for state to stable\r\n", __func__); + msleep(200); + test_counter--; + if(test_counter == 0){ + AICWFDBG(LOGERROR, "%s connecting or disconnecting, not finish\r\n", __func__); + WARN_ON(1); + break; + } + } + +#if defined(AICWF_USB_SUPPORT) || defined(AICWF_SDIO_SUPPORT) + if (rwnx_hw->scanning){ + rwnx_hw->scanning = false; + } + + if(rwnx_hw->p2p_working){ + rwnx_hw->p2p_working = false; + } +#endif +#if 0 + netdev_info(dev, "CLOSE"); +#endif + AICWFDBG(LOGINFO, "%s %s Enter\n", __func__, dev->name); + +#if 0 +#ifdef CONFIG_USE_P2P0 + if(rwnx_hw->p2p_dev_vif){ + atomic_set(&rwnx_hw->p2p_alive_timer_count, P2P_ALIVE_TIME_MS); + rwnx_hw->is_p2p_alive = 0; + } +#endif +#endif + + rwnx_radar_cancel_cac(&rwnx_hw->radar); + + /* Abort scan request on the vif */ + if (rwnx_hw->scan_request && + rwnx_hw->scan_request->wdev == &rwnx_vif->wdev) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(rwnx_hw->scan_request, &info); +#else + cfg80211_scan_done(rwnx_hw->scan_request, true); +#endif + rwnx_hw->scan_request = NULL; + ret = rwnx_send_scanu_cancel_req(rwnx_hw, NULL); + mdelay(35);//make sure firmware take affect + if (ret) { + AICWFDBG(LOGERROR, "scanu_cancel fail\n"); + return ret; + } + } + +#if defined(AICWF_USB_SUPPORT) + if (usbdev != NULL) { + if (usbdev->bus_if->state != BUS_DOWN_ST && usbdev->state != USB_DOWN_ST) + AICWFDBG(LOGINFO, "state: %d %d \r\n", (int)usbdev->bus_if->state, (int)usbdev->state); + + if(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || + RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT){ + test_counter = waiting_counter; + if(atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTED){ + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTING); + rwnx_send_sm_disconnect_req(rwnx_hw, rwnx_vif, 3); + while (atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_DISCONNECTING) { + AICWFDBG(LOGDEBUG, "%s wifi is disconnecting, waiting 100ms for state to stable\r\n", __func__); + msleep(100); + test_counter--; + if (test_counter ==0) + break; + } + } + } +#ifdef CONFIG_USE_P2P0 + if(!rwnx_vif->is_p2p_vif || ( rwnx_vif->is_p2p_vif && rwnx_hw->is_p2p_alive)){ + if (rwnx_vif->is_p2p_vif) + rwnx_hw->is_p2p_alive = 0; +#endif + rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, false); +#ifdef CONFIG_USE_P2P0 + } +#endif + + } +#endif +#if defined(AICWF_SDIO_SUPPORT) + if (sdiodev != NULL ) { + if (sdiodev->bus_if->state != BUS_DOWN_ST) + rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, false); + } +#endif + + if (rwnx_hw->roc_elem && (rwnx_hw->roc_elem->wdev == &rwnx_vif->wdev)) { + AICWFDBG(LOGINFO, "%s clear roc\n", __func__); + /* Initialize RoC element pointer to NULL, indicate that RoC can be started */ + rwnx_hw->roc_elem = NULL; + } + + /* Ensure that we won't process disconnect ind */ + spin_lock_bh(&rwnx_hw->cb_lock); + + rwnx_vif->up = false; + AICWFDBG(LOGDEBUG, "%s rwnx_vif[%d] down \r\n", __func__, rwnx_vif->vif_index); + + if (netif_carrier_ok(dev)) { + if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || + RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) { + cfg80211_disconnected(dev, WLAN_REASON_DEAUTH_LEAVING, + NULL, 0, true, GFP_ATOMIC); + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + } else if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN) { + netif_carrier_off(dev); + } else { + netdev_warn(dev, "AP not stopped when disabling interface"); + } +#ifdef CONFIG_BR_SUPPORT + /* if (OPMODE & (WIFI_STATION_STATE | WIFI_ADHOC_STATE)) */ + { + /* void nat25_db_cleanup(_adapter *priv); */ + nat25_db_cleanup(rwnx_vif); + } +#endif /* CONFIG_BR_SUPPORT */ + } + + + rwnx_hw->vif_table[rwnx_vif->vif_index] = NULL; + spin_unlock_bh(&rwnx_hw->cb_lock); + + rwnx_chanctx_unlink(rwnx_vif); + + if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MONITOR) + rwnx_hw->monitor_vif = RWNX_INVALID_VIF; + + + rwnx_hw->vif_started--; + if (rwnx_hw->vif_started == 0) { + /* This also lets both ipc sides remain in sync before resetting */ + #if 0 + rwnx_ipc_tx_drain(rwnx_hw); + #else + #if defined(AICWF_USB_SUPPORT) + if (usbdev->bus_if->state != BUS_DOWN_ST && usbdev->state != USB_DOWN_ST) + #else + if (sdiodev->bus_if->state != BUS_DOWN_ST) + #endif + { + rwnx_send_reset(rwnx_hw); + #if defined(AICWF_USB_SUPPORT) + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 || + ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2)&& testmode == 0)) { + #endif + // Set parameters to firmware + rwnx_send_me_config_req(rwnx_hw); + // Set channel parameters to firmware + rwnx_send_me_chan_config_req(rwnx_hw, country_code); + #if defined(AICWF_USB_SUPPORT) + } + #endif + } + #endif + } + + clear_bit(RWNX_DEV_STARTED, &rwnx_vif->drv_flags); + AICWFDBG(LOGDEBUG, "%s rwnx_vif->drv_flags:%d\r\n", __func__, (int)rwnx_vif->drv_flags); + return 0; +} + + +#define IOCTL_HOSTAPD (SIOCIWFIRSTPRIV+28) +#define IOCTL_WPAS (SIOCIWFIRSTPRIV+30) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +static int rwnx_do_ioctl(struct net_device *net, struct ifreq *req, void __user *data, int cmd) +#else +static int rwnx_do_ioctl(struct net_device *net, struct ifreq *req, int cmd) +#endif +{ + int ret = 0; + ///TODO: add ioctl command handler later + switch(cmd) + { + case IOCTL_HOSTAPD: + AICWFDBG(LOGINFO, "IOCTL_HOSTAPD\n"); + break; + case IOCTL_WPAS: + AICWFDBG(LOGINFO, "IOCTL_WPAS\n"); + break; + case SIOCDEVPRIVATE: + AICWFDBG(LOGINFO, "IOCTL SIOCDEVPRIVATE\n"); + break; + case (SIOCDEVPRIVATE+1): + AICWFDBG(LOGINFO, "IOCTL PRIVATE\n"); + ret = android_priv_cmd(net, req, cmd); + break; + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +/** + * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + * Called when a user wants to get the network device usage + * statistics. Drivers must do one of the following: + * 1. Define @ndo_get_stats64 to fill in a zero-initialised + * rtnl_link_stats64 structure passed by the caller. + * 2. Define @ndo_get_stats to update a net_device_stats structure + * (which should normally be dev->stats) and return a pointer to + * it. The structure may be changed asynchronously only if each + * field is written atomically. + * 3. Update dev->stats asynchronously and atomically, and define + * neither operation. + */ +static struct net_device_stats *rwnx_get_stats(struct net_device *dev) +{ + struct rwnx_vif *vif = netdev_priv(dev); + + return &vif->net_stats; +} + +/** + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, + * struct net_device *sb_dev); + * Called to decide which queue to when device supports multiple + * transmit queues. + */ +u16 rwnx_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + return rwnx_select_txq(rwnx_vif, skb); +} + +/** + * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); + * This function is called when the Media Access Control address + * needs to be changed. If this interface is not defined, the + * mac address can not be changed. + */ +static int rwnx_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + int ret; + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + printk("%s enter \r\n", __func__); + + ret = eth_mac_addr(dev, sa); + memcpy(rwnx_vif->wdev.address, dev->dev_addr, 6); + + return ret; +} + +static const struct net_device_ops rwnx_netdev_ops = { + .ndo_open = rwnx_open, + .ndo_stop = rwnx_close, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) + .ndo_siocdevprivate = rwnx_do_ioctl, +#else + .ndo_do_ioctl = rwnx_do_ioctl, +#endif + .ndo_start_xmit = rwnx_start_xmit, + .ndo_get_stats = rwnx_get_stats, +#ifndef CONFIG_ONE_TXQ + .ndo_select_queue = rwnx_select_queue, +#endif +#ifdef CONFIG_SUPPORT_REALTIME_CHANGE_MAC + .ndo_set_mac_address = rwnx_set_mac_address +#endif +// .ndo_set_features = rwnx_set_features, +// .ndo_set_rx_mode = rwnx_set_multicast_list, +}; + +static const struct net_device_ops rwnx_netdev_monitor_ops = { + .ndo_open = rwnx_open, + .ndo_stop = rwnx_close, + #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) + .ndo_siocdevprivate = rwnx_do_ioctl, + #else + .ndo_do_ioctl = rwnx_do_ioctl, + #endif + #ifdef CONFIG_RWNX_MON_XMIT + .ndo_start_xmit = rwnx_start_monitor_if_xmit, + .ndo_select_queue = rwnx_select_queue, + #endif + .ndo_get_stats = rwnx_get_stats, + .ndo_set_mac_address = rwnx_set_mac_address, +}; + +static void rwnx_netdev_setup(struct net_device *dev) +{ + ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->netdev_ops = &rwnx_netdev_ops; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) + dev->destructor = free_netdev; +#else + dev->needs_free_netdev = true; +#endif + dev->watchdog_timeo = RWNX_TX_LIFETIME_MS; + + dev->needed_headroom = sizeof(struct rwnx_txhdr) + RWNX_SWTXHDR_ALIGN_SZ - 14; +#ifdef CONFIG_RWNX_AMSDUS_TX + dev->needed_headroom = max(dev->needed_headroom, + (unsigned short)(sizeof(struct rwnx_amsdu_txhdr) + + sizeof(struct ethhdr) + 4 + + sizeof(rfc1042_header) + 2)); +#endif /* CONFIG_RWNX_AMSDUS_TX */ + + dev->hw_features = 0; +} + +#ifndef CONFIG_USE_WIRELESS_EXT +#ifdef CONFIG_WIRELESS_EXT + #include + struct iw_handler_def aic_handlers_def; +#endif +#endif + + +/********************************************************************* + * Cfg80211 callbacks (and helper) + *********************************************************************/ +static struct rwnx_vif *rwnx_interface_add(struct rwnx_hw *rwnx_hw, + const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct net_device *ndev; + struct rwnx_vif *vif; + int min_idx, max_idx; + int vif_idx = -1; + int i; + int nx_nb_ndev_txq = NX_NB_NDEV_TXQ; + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_nb_ndev_txq = NX_NB_NDEV_TXQ_FOR_OLD_IC; + } + + AICWFDBG(LOGINFO, "rwnx_interface_add: %s, %d, %d\r\n", name, type, NL80211_IFTYPE_P2P_DEVICE); + // Look for an available VIF + if (type == NL80211_IFTYPE_AP_VLAN) { + min_idx = NX_VIRT_DEV_MAX; + max_idx = NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX; + } else { + min_idx = 0; + max_idx = NX_VIRT_DEV_MAX; + } + + for (i = min_idx; i < max_idx; i++) { + if ((rwnx_hw->avail_idx_map) & BIT(i)) { + vif_idx = i; + break; + } + } + if (vif_idx < 0) + return NULL; + + #ifndef CONFIG_RWNX_MON_DATA + list_for_each_entry(vif, &rwnx_hw->vifs, list) { + // Check if monitor interface already exists or type is monitor + if ((RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_MONITOR) || + (type == NL80211_IFTYPE_MONITOR)) { + wiphy_err(rwnx_hw->wiphy, + "Monitor+Data interface support (MON_DATA) disabled\n"); + return NULL; + } + } + #endif + +#ifndef CONFIG_ONE_TXQ + ndev = alloc_netdev_mqs(sizeof(*vif), name, name_assign_type, + rwnx_netdev_setup, nx_nb_ndev_txq, 1); +#else + ndev = alloc_netdev_mqs(sizeof(*vif), name, name_assign_type, + rwnx_netdev_setup, 1, 1); +#endif + + if (!ndev) + return NULL; + + vif = netdev_priv(ndev); + vif->key_has_add = 0; + ndev->ieee80211_ptr = &vif->wdev; + vif->wdev.wiphy = rwnx_hw->wiphy; + vif->rwnx_hw = rwnx_hw; + vif->ndev = ndev; + vif->drv_vif_index = vif_idx; + SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy)); + vif->wdev.netdev = ndev; + vif->wdev.iftype = type; + vif->up = false; + vif->ch_index = RWNX_CH_NOT_SET; + memset(&vif->net_stats, 0, sizeof(vif->net_stats)); + vif->is_p2p_vif = 0; +#ifdef CONFIG_BR_SUPPORT + spin_lock_init(&vif->br_ext_lock); +#endif /* CONFIG_BR_SUPPORT */ + + switch (type) { + case NL80211_IFTYPE_STATION: + vif->sta.ap = NULL; + vif->sta.tdls_sta = NULL; + vif->sta.external_auth = false; + break; + case NL80211_IFTYPE_P2P_CLIENT: + vif->sta.ap = NULL; + vif->sta.tdls_sta = NULL; + vif->sta.external_auth = false; + vif->is_p2p_vif = 1; + break; + case NL80211_IFTYPE_MESH_POINT: + INIT_LIST_HEAD(&vif->ap.mpath_list); + INIT_LIST_HEAD(&vif->ap.proxy_list); + vif->ap.create_path = false; + vif->ap.generation = 0; + vif->ap.mesh_pm = NL80211_MESH_POWER_ACTIVE; + vif->ap.next_mesh_pm = NL80211_MESH_POWER_ACTIVE; + // no break + case NL80211_IFTYPE_AP: + INIT_LIST_HEAD(&vif->ap.sta_list); + memset(&vif->ap.bcn, 0, sizeof(vif->ap.bcn)); + break; + case NL80211_IFTYPE_P2P_GO: + INIT_LIST_HEAD(&vif->ap.sta_list); + memset(&vif->ap.bcn, 0, sizeof(vif->ap.bcn)); + vif->is_p2p_vif = 1; + break; + case NL80211_IFTYPE_AP_VLAN: + { + struct rwnx_vif *master_vif; + bool found = false; + list_for_each_entry(master_vif, &rwnx_hw->vifs, list) { + if ((RWNX_VIF_TYPE(master_vif) == NL80211_IFTYPE_AP) && + !(!memcmp(master_vif->ndev->dev_addr, params->macaddr, + ETH_ALEN))) { + found=true; + break; + } + } + + if (!found) + goto err; + + vif->ap_vlan.master = master_vif; + vif->ap_vlan.sta_4a = NULL; + break; + } + case NL80211_IFTYPE_MONITOR: + ndev->type = ARPHRD_IEEE80211_RADIOTAP; + ndev->netdev_ops = &rwnx_netdev_monitor_ops; + break; + default: + break; + } + + if (type == NL80211_IFTYPE_AP_VLAN) { + memcpy((void *)ndev->dev_addr, (const void *)params->macaddr, ETH_ALEN); + memcpy((void *)vif->wdev.address, (const void *)params->macaddr, ETH_ALEN); + } else { +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 17, 0) + unsigned char mac_addr[6]; + memcpy(mac_addr, rwnx_hw->wiphy->perm_addr, ETH_ALEN); + mac_addr[5] ^= vif_idx; + //memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, mac_addr); + memcpy(vif->wdev.address, mac_addr, ETH_ALEN); +#else + memcpy(ndev->dev_addr, rwnx_hw->wiphy->perm_addr, ETH_ALEN); + ndev->dev_addr[5] ^= vif_idx; + memcpy(vif->wdev.address, ndev->dev_addr, ETH_ALEN); +#endif + } + + + AICWFDBG(LOGINFO, "interface add:%x %x %x %x %x %x\n", vif->wdev.address[0], vif->wdev.address[1], + vif->wdev.address[2], vif->wdev.address[3], vif->wdev.address[4], vif->wdev.address[5]); + + if (params) { + vif->use_4addr = params->use_4addr; + ndev->ieee80211_ptr->use_4addr = params->use_4addr; + } else + vif->use_4addr = false; + +#ifdef CONFIG_USE_WIRELESS_EXT + aicwf_set_wireless_ext(ndev, rwnx_hw); +#else +#ifdef CONFIG_WIRELESS_EXT + memset(&aic_handlers_def, 0,sizeof(struct iw_handler_def)); + ndev->wireless_handlers = (struct iw_handler_def *)&aic_handlers_def; +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) + if (cfg80211_register_netdevice(ndev)) +#else + if (register_netdevice(ndev)) +#endif + goto err; + + + spin_lock_bh(&rwnx_hw->cb_lock); + list_add_tail(&vif->list, &rwnx_hw->vifs); + spin_unlock_bh(&rwnx_hw->cb_lock); + rwnx_hw->avail_idx_map &= ~BIT(vif_idx); + + return vif; + +err: + free_netdev(ndev); + return NULL; +} + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void aicwf_p2p_alive_timeout(ulong data) +#else +void aicwf_p2p_alive_timeout(struct timer_list *t) +#endif +{ + struct rwnx_hw *rwnx_hw; + struct rwnx_vif *rwnx_vif; + struct rwnx_vif *rwnx_vif1, *tmp; + u8_l p2p = 0; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + rwnx_vif = (struct rwnx_vif *)data; + rwnx_hw = rwnx_vif->rwnx_hw; + #else + rwnx_hw = from_timer(rwnx_hw, t, p2p_alive_timer); + rwnx_vif = rwnx_hw->p2p_dev_vif; + #endif + + //printk("%s enter %d \r\n", __func__, atomic_read(&rwnx_hw->p2p_alive_timer_count)); + +#if 1 //AIDEN workaround + if(atomic_read(&rwnx_hw->p2p_alive_timer_count) > 2){ + rwnx_hw->p2p_working = false; + } +#endif + + list_for_each_entry_safe(rwnx_vif1, tmp, &rwnx_hw->vifs, list) { + if ((rwnx_hw->avail_idx_map & BIT(rwnx_vif1->drv_vif_index)) == 0) { + switch(RWNX_VIF_TYPE(rwnx_vif1)) { + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + rwnx_hw->is_p2p_alive = 1; + p2p = 1; + break; + default: + break; + } + } + } + + if (p2p){ + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); + }else{ + atomic_inc(&rwnx_hw->p2p_alive_timer_count); + } + + if (atomic_read(&rwnx_hw->p2p_alive_timer_count) < P2P_ALIVE_TIME_COUNT) { + mod_timer(&rwnx_hw->p2p_alive_timer, + jiffies + msecs_to_jiffies(P2P_ALIVE_TIME_MS)); + return; + } else + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); + + rwnx_hw->is_p2p_alive = 0; + rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, true); + + /* Ensure that we won't process disconnect ind */ + spin_lock_bh(&rwnx_hw->cb_lock); + + rwnx_vif->up = false; + rwnx_hw->vif_table[rwnx_vif->vif_index] = NULL; + AICWFDBG(LOGDEBUG, "%s rwnx_vif[%d] down \r\n", __func__, rwnx_vif->vif_index); + rwnx_hw->vif_started--; + spin_unlock_bh(&rwnx_hw->cb_lock); +} + +#ifdef CONFIG_DYNAMIC_PERPWR +void rssi_update_txpwrloss(struct rwnx_sta *sta, s8_l rssi) +{ + int rssi_avg; + + rssi_avg = (sta->rssi_save + rssi) / 2; + sta->rssi_save = rssi_avg; + + //printk("New RSSI: %d, Average RSSI: %d\n", rssi, rssi_avg); + + if(rssi_avg > RSSI_THD_0) { + if (sta->per_pwrloss != PWR_LOSS_LVL0) { + if ((jiffies_to_msecs(jiffies - sta->last_jiffies) < PWR_DELAY_TIME) && (sta->per_pwrloss >= PWR_LOSS_LVL0)) { + //printk("delay p1\n"); + return; + } + schedule_work(&sta->per_pwr_work); + sta->per_pwrloss = PWR_LOSS_LVL0; + sta->last_jiffies = jiffies; + } + } else if ((rssi_avg > RSSI_THD_1) && (rssi_avg <= RSSI_THD_0)) { + if (sta->per_pwrloss != PWR_LOSS_LVL1) { + if ((jiffies_to_msecs(jiffies - sta->last_jiffies) < PWR_DELAY_TIME) && (sta->per_pwrloss >= PWR_LOSS_LVL1)) { + //printk("delay p2\n"); + return; + } + schedule_work(&sta->per_pwr_work); + sta->per_pwrloss = PWR_LOSS_LVL1; + sta->last_jiffies = jiffies; + } + } else if ((rssi_avg > RSSI_THD_2) && (rssi_avg <= RSSI_THD_1)) { + if (sta->per_pwrloss != PWR_LOSS_LVL2) { + if ((jiffies_to_msecs(jiffies - sta->last_jiffies) < PWR_DELAY_TIME) && (sta->per_pwrloss >= PWR_LOSS_LVL2)) { + //printk("delay p3\n"); + return; + } + schedule_work(&sta->per_pwr_work); + sta->per_pwrloss = PWR_LOSS_LVL2; + sta->last_jiffies = jiffies; + } + } else if (rssi_avg <= RSSI_THD_2) { + if (sta->per_pwrloss != PWR_LOSS_LVL3) { + schedule_work(&sta->per_pwr_work); + sta->per_pwrloss = PWR_LOSS_LVL3; + sta->last_jiffies = jiffies; + } + } +} + +void aicwf_txpwer_per_sta_worker(struct work_struct *work) +{ + struct rwnx_sta *sta; + sta = container_of(work, struct rwnx_sta, per_pwr_work); + + AICWFDBG(LOGINFO, "per_sta_worker: idx: %d\n", sta->sta_idx); + rwnx_send_txpwr_per_sta_req(g_rwnx_plat->usbdev->rwnx_hw, sta); +} +#endif + +#ifdef CONFIG_DYNAMIC_PWR +void set_txpwrloss_ctrl(struct rwnx_hw *rwnx_hw, s8 value) +{ + AICWFDBG(LOGINFO, "set_txpwrloss_ctrl: %d\n", value); + set_txpwr_loss_ofst(value); + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW) { + rwnx_send_txpwr_lvl_req(rwnx_hw); + } else if ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81)) { + rwnx_send_txpwr_lvl_v3_req(rwnx_hw); + } else if ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2) || + (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2)) { + rwnx_send_txpwr_lvl_v4_req(rwnx_hw); + } +} + +void aicwf_pwrloss_worker(struct work_struct *work) +{ + struct rwnx_hw *rwnx_hw; + struct rwnx_vif *rwnx_vif; + struct mm_get_sta_info_cfm cfm; + int ret = 0; + s8 rssi; + + rwnx_hw = container_of(work, struct rwnx_hw, pwrloss_work); + rwnx_vif = rwnx_hw->read_rssi_vif; + if (!dynamic_pwr) { + if (rwnx_hw->pwrloss_lvl != 0) { + set_txpwrloss_ctrl(rwnx_hw, 0); + } + mod_timer(&rwnx_hw->pwrloss_timer, jiffies + msecs_to_jiffies(RSSI_GET_INTERVAL)); + return; + } + if(atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTED){ + ret = rwnx_send_get_sta_info_req(rwnx_hw, rwnx_hw->sta_rssi_idx, &cfm); + if(ret < 0) { + AICWFDBG(LOGINFO, "rwnx_send_get_sta_info_req: %dn", ret); + return; + } + rssi = (s8)cfm.rssi; + AICWFDBG(LOGINFO, "%s: rssi:%d lvl : %d\n", __func__, rssi, rwnx_hw->pwrloss_lvl); + if(rssi > RSSI_THD_0) { + if (rwnx_hw->pwrloss_lvl != PWR_LOSS_LVL0) { + set_txpwrloss_ctrl(rwnx_hw, PWR_LOSS_LVL0); + rwnx_hw->pwrloss_lvl = PWR_LOSS_LVL0; + } + } else if ((rssi > RSSI_THD_1) && (rssi <= RSSI_THD_0)) { + if (rwnx_hw->pwrloss_lvl != PWR_LOSS_LVL1) { + set_txpwrloss_ctrl(rwnx_hw, PWR_LOSS_LVL1); + rwnx_hw->pwrloss_lvl = PWR_LOSS_LVL1; + } + } else if ((rssi > RSSI_THD_2) && (rssi <= RSSI_THD_1)) { + if (rwnx_hw->pwrloss_lvl != PWR_LOSS_LVL2) { + set_txpwrloss_ctrl(rwnx_hw, PWR_LOSS_LVL2); + rwnx_hw->pwrloss_lvl = PWR_LOSS_LVL2; + } + } else if (rssi <= RSSI_THD_2) { + if (rwnx_hw->pwrloss_lvl != PWR_LOSS_LVL3) { + set_txpwrloss_ctrl(rwnx_hw, PWR_LOSS_LVL3); + rwnx_hw->pwrloss_lvl = PWR_LOSS_LVL3; + } + } + } else if (atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_DISCONNECTED){ + if (rwnx_hw->pwrloss_lvl != 0) { + set_txpwrloss_ctrl(rwnx_hw, 0); + rwnx_hw->pwrloss_lvl = 0; + } + } + mod_timer(&rwnx_hw->pwrloss_timer, jiffies + msecs_to_jiffies(RSSI_GET_INTERVAL)); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +static void aicwf_pwrloss_timer(ulong data) +#else +static void aicwf_pwrloss_timer(struct timer_list *t) +#endif +{ + struct rwnx_hw *rwnx_hw; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + rwnx_vif = (struct rwnx_vif *)data; + rwnx_hw = rwnx_vif->rwnx_hw; +#else + rwnx_hw = from_timer(rwnx_hw, t, pwrloss_timer); +#endif + if (!work_pending(&rwnx_hw->pwrloss_work)) + schedule_work(&rwnx_hw->pwrloss_work); + return; + +} +#endif + +/********************************************************************* + * Cfg80211 callbacks (and helper) + *********************************************************************/ +static struct wireless_dev *rwnx_virtual_interface_add(struct rwnx_hw *rwnx_hw, + const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct wireless_dev *wdev = NULL; + struct rwnx_vif *vif; + int min_idx, max_idx; + int vif_idx = -1; + int i; + + AICWFDBG(LOGINFO, "rwnx_virtual_interface_add: %d, %s\n", type, name); + + if (type == NL80211_IFTYPE_AP_VLAN) { + min_idx = NX_VIRT_DEV_MAX; + max_idx = NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX; + } else { + min_idx = 0; + max_idx = NX_VIRT_DEV_MAX; + } + + for (i = min_idx; i < max_idx; i++) { + if ((rwnx_hw->avail_idx_map) & BIT(i)) { + vif_idx = i; + break; + } + } + + if (vif_idx < 0) { + AICWFDBG(LOGERROR, "virtual_interface_add %s fail\n", name); + return NULL; + } + + vif = kzalloc(sizeof(struct rwnx_vif), GFP_KERNEL); + if (unlikely(!vif)) { + AICWFDBG(LOGERROR, "Could not allocate wireless device\n"); + return NULL; + } + wdev = &vif->wdev; + wdev->wiphy = rwnx_hw->wiphy; + wdev->iftype = type; + + AICWFDBG(LOGINFO, "rwnx_virtual_interface_add, ifname=%s, wdev=%p, vif_idx=%d\n", name, wdev, vif_idx); + + #ifndef CONFIG_USE_P2P0 + vif->is_p2p_vif = 1; + vif->rwnx_hw = rwnx_hw; + vif->vif_index = vif_idx; + vif->wdev.wiphy = rwnx_hw->wiphy; + vif->drv_vif_index = vif_idx; + vif->up = false; + vif->ch_index = RWNX_CH_NOT_SET; + memset(&vif->net_stats, 0, sizeof(vif->net_stats)); + vif->use_4addr = false; + + spin_lock_bh(&rwnx_hw->cb_lock); + list_add_tail(&vif->list, &rwnx_hw->vifs); + spin_unlock_bh(&rwnx_hw->cb_lock); + + if (rwnx_hw->is_p2p_alive == 0) { + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + init_timer(&rwnx_hw->p2p_alive_timer); + rwnx_hw->p2p_alive_timer.data = (unsigned long)vif; + rwnx_hw->p2p_alive_timer.function = aicwf_p2p_alive_timeout; + #else + timer_setup(&rwnx_hw->p2p_alive_timer, aicwf_p2p_alive_timeout, 0); + #endif + rwnx_hw->is_p2p_alive = 0; + rwnx_hw->is_p2p_connected = 0; + rwnx_hw->p2p_dev_vif = vif; + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); + } + #endif + rwnx_hw->avail_idx_map &= ~BIT(vif_idx); + + memcpy(vif->wdev.address, rwnx_hw->wiphy->perm_addr, ETH_ALEN); + vif->wdev.address[5] ^= vif_idx; + AICWFDBG(LOGERROR, "p2p dev addr=%x %x %x %x %x %x\n", vif->wdev.address[0], vif->wdev.address[1], \ + vif->wdev.address[2], vif->wdev.address[3], vif->wdev.address[4], vif->wdev.address[5]); + + return wdev; +} + +/* + * @brief Retrieve the rwnx_sta object allocated for a given MAC address + * and a given role. + */ + +struct rwnx_sta *rwnx_retrieve_sta(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, u8 *addr, + __le16 fc, bool ap) +{ + if (ap) { + /* only deauth, disassoc and action are bufferable MMPDUs */ + bool bufferable = ieee80211_is_deauth(fc) || + ieee80211_is_disassoc(fc) || + ieee80211_is_action(fc); + + /* Check if the packet is bufferable or not */ + if (bufferable) + { + /* Check if address is a broadcast or a multicast address */ + if (is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { + /* Returned STA pointer */ + struct rwnx_sta *rwnx_sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index]; + + if (rwnx_sta->valid) + return rwnx_sta; + } else { + /* Returned STA pointer */ + struct rwnx_sta *rwnx_sta; + + /* Go through list of STAs linked with the provided VIF */ + spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock); + list_for_each_entry(rwnx_sta, &rwnx_vif->ap.sta_list, list) { + AICWFDBG(LOGDEBUG, "%s mac_addr:%x %x %x %x %x %x addr:%x %x %x %x %x %x \r\n", __func__, + rwnx_sta->mac_addr[0],rwnx_sta->mac_addr[1],rwnx_sta->mac_addr[2], + rwnx_sta->mac_addr[3],rwnx_sta->mac_addr[4],rwnx_sta->mac_addr[5], + addr[0],addr[1],addr[2],addr[3],addr[4],addr[5]); + if (rwnx_sta->valid && + ether_addr_equal(rwnx_sta->mac_addr, addr)) { + /* Return the found STA */ + spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock); + return rwnx_sta; + } + } + spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock); + } + } + } else { + return rwnx_vif->sta.ap; + } + + return NULL; +} + +/** + * @add_virtual_intf: create a new virtual interface with the given name, + * must set the struct wireless_dev's iftype. Beware: You must create + * the new netdev in the wiphy's network namespace! Returns the struct + * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must + * also set the address member in the wdev. + */ +static struct wireless_dev *rwnx_cfg80211_add_iface(struct wiphy *wiphy, + const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct wireless_dev *wdev; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) + unsigned char name_assign_type = NET_NAME_UNKNOWN; +#endif + + if (type != NL80211_IFTYPE_P2P_DEVICE) { + struct rwnx_vif *vif= rwnx_interface_add(rwnx_hw, name, name_assign_type, type, params); + if (!vif) + return ERR_PTR(-EINVAL); + return &vif->wdev; + + } else { + wdev = rwnx_virtual_interface_add(rwnx_hw, name, name_assign_type, type, params); + if (!wdev) + return ERR_PTR(-EINVAL); + return wdev; + } +} + +/** + * @del_virtual_intf: remove the virtual interface + */ +static int rwnx_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + struct net_device *dev = wdev->netdev; + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); +#else + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); +#endif + // printk("del_iface: %p\n",wdev); + + AICWFDBG(LOGINFO, "del_iface: %p, %x\n",wdev, wdev->address[5]); + + if (!dev || !rwnx_vif->ndev) { +#if 0 + if (rwnx_vif == rwnx_hw->p2p_dev_vif) { + if (timer_pending(&rwnx_hw->p2p_alive_timer)) { + del_timer_sync(&rwnx_hw->p2p_alive_timer); + } + } +#endif + cfg80211_unregister_wdev(wdev); + spin_lock_bh(&rwnx_hw->cb_lock); + list_del(&rwnx_vif->list); + spin_unlock_bh(&rwnx_hw->cb_lock); + rwnx_hw->avail_idx_map |= BIT(rwnx_vif->drv_vif_index); + rwnx_vif->ndev = NULL; + kfree(rwnx_vif); + return 0; + } +#if 0 + netdev_info(dev, "Remove Interface"); +#endif + + AICWFDBG(LOGINFO, "%s Remove Interface \r\n", dev->name); + if (dev->reg_state == NETREG_REGISTERED) { + /* Will call rwnx_close if interface is UP */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) + cfg80211_unregister_netdevice(dev); +#else + unregister_netdevice(dev); +#endif + + } + + spin_lock_bh(&rwnx_hw->cb_lock); + list_del(&rwnx_vif->list); + spin_unlock_bh(&rwnx_hw->cb_lock); + rwnx_hw->avail_idx_map |= BIT(rwnx_vif->drv_vif_index); + rwnx_vif->ndev = NULL; + + /* Clear the priv in adapter */ + dev->ieee80211_ptr = NULL; + + return 0; +} + +/** + * @change_virtual_intf: change type/configuration of virtual interface, + * keep the struct wireless_dev's iftype updated. + */ +static int rwnx_cfg80211_change_iface(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, + struct vif_params *params) +{ +#ifndef CONFIG_RWNX_MON_DATA + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); +#endif + struct rwnx_vif *vif = netdev_priv(dev); + struct mm_add_if_cfm add_if_cfm; + bool_l p2p = false; + int ret; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + AICWFDBG(LOGINFO, "change_if: %d to %d, %d, %d\r\n", vif->wdev.iftype, type, NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_STATION); + +#ifndef CONFIG_RWNX_MON_DATA + if ((type == NL80211_IFTYPE_MONITOR) && + (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_MONITOR)) { + struct rwnx_vif *vif_el; + list_for_each_entry(vif_el, &rwnx_hw->vifs, list) { + // Check if data interface already exists + if ((vif_el != vif) && + (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_MONITOR)) { + wiphy_err(rwnx_hw->wiphy, + "Monitor+Data interface support (MON_DATA) disabled\n"); + return -EIO; + } + } + } +#endif + + // Reset to default case (i.e. not monitor) + dev->type = ARPHRD_ETHER; + dev->netdev_ops = &rwnx_netdev_ops; + + switch (type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + vif->sta.ap = NULL; + vif->sta.tdls_sta = NULL; + vif->sta.external_auth = false; + break; + case NL80211_IFTYPE_MESH_POINT: + INIT_LIST_HEAD(&vif->ap.mpath_list); + INIT_LIST_HEAD(&vif->ap.proxy_list); + vif->ap.create_path = false; + vif->ap.generation = 0; + // no break + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + INIT_LIST_HEAD(&vif->ap.sta_list); + memset(&vif->ap.bcn, 0, sizeof(vif->ap.bcn)); + break; + case NL80211_IFTYPE_AP_VLAN: + return -EPERM; + case NL80211_IFTYPE_MONITOR: + dev->type = ARPHRD_IEEE80211_RADIOTAP; + dev->netdev_ops = &rwnx_netdev_monitor_ops; + break; + default: + break; + } + + vif->wdev.iftype = type; + if (params->use_4addr != -1) + vif->use_4addr = params->use_4addr; + if (type == NL80211_IFTYPE_P2P_CLIENT || type == NL80211_IFTYPE_P2P_GO){ + p2p = true; + } + if (vif->up) { + /* Abort scan request on the vif */ + if (vif->rwnx_hw->scan_request && + vif->rwnx_hw->scan_request->wdev == &vif->wdev) { +#if 0 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + struct cfg80211_scan_info info = { + .aborted = true, + }; + cfg80211_scan_done(vif->rwnx_hw->scan_request, &info); +#else + cfg80211_scan_done(vif->rwnx_hw->scan_request, true); +#endif + if ((ret = rwnx_send_scanu_cancel_req(vif->rwnx_hw, NULL))) { + AICWFDBG(LOGERROR, "scanu_cancel fail\n"); + return ret; + } + vif->rwnx_hw->scan_request = NULL; +#else + if ((ret = rwnx_send_scanu_cancel_req(vif->rwnx_hw, NULL))) { + AICWFDBG(LOGERROR, "scanu_cancel fail\n"); + return ret; + } +#endif + } + + if ((ret = rwnx_send_remove_if(vif->rwnx_hw, vif->vif_index, false))) { + AICWFDBG(LOGERROR, "remove_if fail\n"); + return ret; + } + vif->rwnx_hw->vif_table[vif->vif_index] = NULL; + AICWFDBG(LOGINFO, "change_if from %d \n", vif->vif_index); + if ((ret = rwnx_send_add_if(vif->rwnx_hw, vif->wdev.address, + RWNX_VIF_TYPE(vif), p2p, &add_if_cfm))) { + AICWFDBG(LOGERROR, "add if fail\n"); + return ret; + } + if (add_if_cfm.status != 0) { + AICWFDBG(LOGERROR, "add if status fail\n"); + return -EIO; + } + + AICWFDBG(LOGINFO, "change_if to %d \n", add_if_cfm.inst_nbr); + /* Save the index retrieved from LMAC */ + spin_lock_bh(&vif->rwnx_hw->cb_lock); + vif->vif_index = add_if_cfm.inst_nbr; + vif->rwnx_hw->vif_table[add_if_cfm.inst_nbr] = vif; + spin_unlock_bh(&vif->rwnx_hw->cb_lock); + } + + if (type == NL80211_IFTYPE_MONITOR) { + vif->rwnx_hw->monitor_vif = vif->vif_index; + #if defined(CONFIG_RWNX_MON_XMIT) + rwnx_txq_unk_vif_init(vif); + #endif + #if defined(CONFIG_RWNX_MON_RXFILTER) + rwnx_send_set_filter(vif->rwnx_hw, (FIF_BCN_PRBRESP_PROMISC | FIF_OTHER_BSS | FIF_PSPOLL | FIF_PROBE_REQ)); + #endif + } else { + vif->rwnx_hw->monitor_vif = RWNX_INVALID_VIF; + } + + return 0; +} + +static int rwnx_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + int ret = 0; + + //do nothing + AICWFDBG(LOGINFO, "P2P interface started\n"); + + return ret; +} + +static void rwnx_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ +#if 0 + int ret = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + if (!cfg) + return; + + CFGP2P_DBG(("Enter\n")); + + ret = wl_cfg80211_scan_stop(cfg, wdev); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret)); + } + + if (!cfg->p2p) + return; + + /* Cancel any on-going listen */ + wl_cfgp2p_cancel_listen(cfg, bcmcfg_to_prmry_ndev(cfg), wdev, TRUE); + + ret = wl_cfgp2p_disable_discovery(cfg); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret)); + } + + p2p_on(cfg) = false; +#endif + int ret = 0; + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); + /* Abort scan request on the vif */ + if (rwnx_hw->scan_request && + rwnx_hw->scan_request->wdev == &rwnx_vif->wdev) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(rwnx_hw->scan_request, &info); +#else + cfg80211_scan_done(rwnx_hw->scan_request, true); +#endif + rwnx_hw->scan_request = NULL; + ret = rwnx_send_scanu_cancel_req(rwnx_hw, NULL); + if (ret){ + AICWFDBG(LOGERROR, "scanu_cancel fail\n"); + } + } + + if (rwnx_vif == rwnx_hw->p2p_dev_vif) { + rwnx_hw->is_p2p_alive = 0; + if (timer_pending(&rwnx_hw->p2p_alive_timer)) { + del_timer_sync(&rwnx_hw->p2p_alive_timer); + } + if (rwnx_vif->up) { + rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, true); + /* Ensure that we won't process disconnect ind */ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_vif->up = false; + rwnx_hw->vif_table[rwnx_vif->vif_index] = NULL; + AICWFDBG(LOGDEBUG, "%s rwnx_vif[%d] down \r\n", __func__, rwnx_vif->vif_index); + rwnx_hw->vif_started--; + spin_unlock_bh(&rwnx_hw->cb_lock); + } + } + + AICWFDBG(LOGINFO, "Exit. P2P interface stopped\n"); + + return; +} + +int rwnx_send_check_p2p(struct cfg80211_scan_request *param){ + int index = (u8)min_t(int, SCAN_SSID_MAX, param->n_ssids); + int i = 0; + + for(i = 0;i < index;i++){ + if (!memcmp("DIRECT-", param->ssids[i].ssid, + (sizeof("DIRECT-") - 1))) { + //printk("AIDEN rwnx_send_check_p2p!!\r\n"); + return 1; + } + } + return 0; +} + +/** + * @scan: Request to do a scan. If returning zero, the scan request is given + * the driver, and will be valid until passed to cfg80211_scan_done(). + * For scan results, call cfg80211_inform_bss(); you can call this outside + * the scan/scan_done bracket too. + */ +static int rwnx_cfg80211_scan(struct wiphy *wiphy, + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) + struct net_device *dev, + #endif + struct cfg80211_scan_request *request) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + struct rwnx_vif *rwnx_vif = container_of(request->wdev, struct rwnx_vif, + wdev); +#else + struct rwnx_vif* rwnx_vif = netdev_priv(request->dev); +#endif + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if(testmode){ + AICWFDBG(LOGERROR, "%s in testmode return busy\r\n", __func__); + return -EBUSY; + } + + if((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTING){ + AICWFDBG(LOGERROR, "%s in connecting return busy\r\n", __func__); + return -EBUSY; + } + +#ifndef CONFIG_STA_SCAN_WHEN_P2P_WORKING + if (rwnx_hw->p2p_working && RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_P2P_CLIENT && + !rwnx_send_check_p2p(request)) { + AICWFDBG(LOGINFO, "p2p is working, scan abort\n"); + return -EBUSY; + } +#endif + + if (rwnx_hw->scanning) { + AICWFDBG(LOGERROR, "%s is scanning, abort\n", __func__); + #if 0//AIDEN test + error = rwnx_send_scanu_cancel_req(rwnx_hw, NULL); + if (error) + return error; + msleep(150); + #endif + return -EBUSY; + } + + if((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION ||RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) && rwnx_vif->sta.external_auth){ + AICWFDBG(LOGINFO, "scan about: external auth\r\n"); + return -EBUSY; + } + + rwnx_hw->scan_request = request; + if ((error = rwnx_send_scanu_req(rwnx_hw, rwnx_vif, request))) + return error; + + return 0; +} + +bool key_flag = false; +/** + * @add_key: add a key with the given parameters. @mac_addr will be %NULL + * when adding a group key. + */ +static int rwnx_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev, +#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION2) + int link_id, +#endif + u8 key_index, bool pairwise, const u8 *mac_addr, + struct key_params *params) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *vif = netdev_priv(netdev); + int i, error = 0; + struct mm_key_add_cfm key_add_cfm; + u8_l cipher = 0; + struct rwnx_sta *sta = NULL; + struct rwnx_key *rwnx_key; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (mac_addr) { + sta = rwnx_get_sta(rwnx_hw, mac_addr); + if (!sta) + return -EINVAL; + rwnx_key = &sta->key; + if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + vif->sta.paired_cipher_type = params->cipher; + } + else { + rwnx_key = &vif->key[key_index]; + vif->key_has_add = 1; + if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + vif->sta.group_cipher_type = params->cipher; + } + + /* Retrieve the cipher suite selector */ + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + cipher = MAC_CIPHER_WEP40; + break; + case WLAN_CIPHER_SUITE_WEP104: + cipher = MAC_CIPHER_WEP104; + break; + case WLAN_CIPHER_SUITE_TKIP: + cipher = MAC_CIPHER_TKIP; + break; + case WLAN_CIPHER_SUITE_CCMP: + cipher = MAC_CIPHER_CCMP; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + cipher = MAC_CIPHER_BIP_CMAC_128; + break; + case WLAN_CIPHER_SUITE_SMS4: + { + // Need to reverse key order + u8 tmp, *key = (u8 *)params->key; + cipher = MAC_CIPHER_WPI_SMS4; + for (i = 0; i < WPI_SUBKEY_LEN/2; i++) { + tmp = key[i]; + key[i] = key[WPI_SUBKEY_LEN - 1 - i]; + key[WPI_SUBKEY_LEN - 1 - i] = tmp; + } + for (i = 0; i < WPI_SUBKEY_LEN/2; i++) { + tmp = key[i + WPI_SUBKEY_LEN]; + key[i + WPI_SUBKEY_LEN] = key[WPI_KEY_LEN - 1 - i]; + key[WPI_KEY_LEN - 1 - i] = tmp; + } + break; + } + default: + return -EINVAL; + } + + key_flag = false; + if ((error = rwnx_send_key_add(rwnx_hw, vif->vif_index, + (sta ? sta->sta_idx : 0xFF), pairwise, + (u8 *)params->key, params->key_len, + key_index, cipher, &key_add_cfm))) + return error; + + if (key_add_cfm.status != 0) { + RWNX_PRINT_CFM_ERR(key_add); + return -EIO; + } + + /* Save the index retrieved from LMAC */ + rwnx_key->hw_idx = key_add_cfm.hw_key_idx; + + return 0; +} + +/** + * @get_key: get information about the key with the given parameters. + * @mac_addr will be %NULL when requesting information for a group + * key. All pointers given to the @callback function need not be valid + * after it returns. This function should return an error if it is + * not possible to retrieve the key, -ENOENT if it doesn't exist. + * + */ +static int rwnx_cfg80211_get_key(struct wiphy *wiphy, struct net_device *netdev, +#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION2) + int link_id, +#endif + + u8 key_index, bool pairwise, const u8 *mac_addr, + void *cookie, + void (*callback)(void *cookie, struct key_params*)) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); + + return -1; +} + + +/** + * @del_key: remove a key given the @mac_addr (%NULL for a group key) + * and @key_index, return -ENOENT if the key doesn't exist. + */ +static int rwnx_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev, +#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION2) + int link_id, +#endif + + u8 key_index, bool pairwise, const u8 *mac_addr) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *vif = netdev_priv(netdev); + int error; + struct rwnx_sta *sta = NULL; + struct rwnx_key *rwnx_key; + if (!key_flag && vif->wdev.iftype == NL80211_IFTYPE_STATION) + return 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + if (mac_addr) { + sta = rwnx_get_sta(rwnx_hw, mac_addr); + if (!sta) + return -EINVAL; + rwnx_key = &sta->key; + if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + vif->sta.paired_cipher_type = 0xff; + } + else { + rwnx_key = &vif->key[key_index]; + vif->key_has_add = 0; + if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + vif->sta.group_cipher_type = 0xff; + } + + error = rwnx_send_key_del(rwnx_hw, rwnx_key->hw_idx); + + rwnx_key->hw_idx = 0; + return error; +} + +/** + * @set_default_key: set the default key on an interface + */ +static int rwnx_cfg80211_set_default_key(struct wiphy *wiphy, + struct net_device *netdev, +#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION2) + int link_id, +#endif + u8 key_index, bool unicast, bool multicast) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); + + return 0; +} + +/** + * @set_default_mgmt_key: set the default management frame key on an interface + */ +static int rwnx_cfg80211_set_default_mgmt_key(struct wiphy *wiphy, + struct net_device *netdev, +#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION2) + int link_id, +#endif + u8 key_index) +{ + return 0; +} + +/** + * @connect: Connect to the ESS with the specified parameters. When connected, + * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS. + * If the connection fails for some reason, call cfg80211_connect_result() + * with the status from the AP. + * (invoked with the wireless_dev mutex held) + */ + +static int rwnx_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct sm_connect_cfm sm_connect_cfm; + int error = 0; + int is_wep = ((sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) || + (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104) || + (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP40) || + (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP104)); + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#if 1 + + if((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTED) { + AICWFDBG(LOGDEBUG, "%s this connection is roam \r\n", __func__); + rwnx_vif->sta.is_roam = true; + }else{ + rwnx_vif->sta.is_roam = false; + } + + if((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_DISCONNECTING|| + (int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTING) { + AICWFDBG(LOGERROR, "%s driver is disconnecting or connecting ,return it \r\n", __func__); + return 0; + } +#endif + + if(rwnx_vif->sta.is_roam){ + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_ROAMING); + }else{ + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_CONNECTING); + } + + + if(is_wep) { + if(sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { + if(rwnx_vif->wep_enabled && rwnx_vif->wep_auth_err) { + if(rwnx_vif->last_auth_type == NL80211_AUTHTYPE_SHARED_KEY) + sme->auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; + else + sme->auth_type = NL80211_AUTHTYPE_SHARED_KEY; + } else { + if((rwnx_vif->wep_enabled && !rwnx_vif->wep_auth_err)) + sme->auth_type = rwnx_vif->last_auth_type; + else + sme->auth_type = NL80211_AUTHTYPE_SHARED_KEY; + } + AICWFDBG(LOGINFO, "auto: use sme->auth_type = %d\r\n", sme->auth_type); + } else { + if (rwnx_vif->wep_enabled && rwnx_vif->wep_auth_err && (sme->auth_type == rwnx_vif->last_auth_type)) { + if(sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { + sme->auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; + AICWFDBG(LOGINFO, "start connect, auth_type changed, shared --> open\n"); + } else if(sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) { + sme->auth_type = NL80211_AUTHTYPE_SHARED_KEY; + AICWFDBG(LOGINFO, "start connect, auth_type changed, open --> shared\n"); + } + } + } + } + + /* For SHARED-KEY authentication, must install key first */ + if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY && sme->key) + { + struct key_params key_params; + key_params.key = (u8*)sme->key; + key_params.seq = NULL; + key_params.key_len = sme->key_len; + key_params.seq_len = 0; + key_params.cipher = sme->crypto.cipher_group; + rwnx_cfg80211_add_key(wiphy, dev, +#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION2) + 0, +#endif + sme->key_idx, false, NULL, &key_params); + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL) + else if ((sme->auth_type == NL80211_AUTHTYPE_SAE) && + !(sme->flags & CONNECT_REQ_EXTERNAL_AUTH_SUPPORT)) { + netdev_err(dev, "Doesn't support SAE without external authentication\n"); + return -EINVAL; + } +#endif + + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) { + rwnx_hw->is_p2p_connected = 1; + } + + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) { + rwnx_vif->sta.paired_cipher_type = 0xff; + rwnx_vif->sta.group_cipher_type = 0xff; + } + + + /* Forward the information to the LMAC */ + if ((error = rwnx_send_sm_connect_req(rwnx_hw, rwnx_vif, sme, &sm_connect_cfm))) + return error; + + // Check the status + switch (sm_connect_cfm.status) + { + case CO_OK: + error = 0; + break; + case CO_BUSY: + error = -EINPROGRESS; + break; + case CO_OP_IN_PROGRESS: + error = -EALREADY; + break; + default: + error = -EIO; + break; + } + + return error; +} + +/** + * @disconnect: Disconnect from the BSS/ESS. + * (invoked with the wireless_dev mutex held) + */ +static int rwnx_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + + RWNX_DBG(RWNX_FN_ENTRY_STR); + AICWFDBG(LOGINFO, "%s drv_vif_index:%d disconnect reason:%d \r\n", + __func__, rwnx_vif->drv_vif_index, reason_code); + + + if(atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_DISCONNECTED) { + AICWFDBG(LOGERROR, "%s this\r\n",__func__); + WARN_ON(1); + return -EBUSY; + } + + if(atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_DISCONNECTING) { + AICWFDBG(LOGERROR, "%s wifi is disconnecting, return it:%d \r\n", + __func__, reason_code); + return -EBUSY; + } + + if(atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_CONNECTED|| + atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_CONNECTING|| + atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_ROAMING){ + + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTING); + + #ifdef CONFIG_USE_WIRELESS_EXT + memset(rwnx_hw->wext_essid, 0, 33); + #endif + key_flag = true; + return(rwnx_send_sm_disconnect_req(rwnx_hw, rwnx_vif, reason_code)); + } +#if 0 + else{ + cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, + reason_code?reason_code:WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_ATOMIC); + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTED); + rwnx_external_auth_disable(rwnx_vif); + return 0; + } +#endif + return 0; +} + +#ifdef CONFIG_SCHED_SCAN + +static int rwnx_cfg80211_sched_scan_stop(struct wiphy *wiphy, + struct net_device *ndev +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + ,u64 reqid) +#else + ) +#endif +{ + + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + //struct rwnx_vif *rwnx_vif = netdev_priv(dev); + AICWFDBG(LOGINFO, "%s enter wiphy:%p\r\n", __func__, wiphy); + + if(rwnx_hw->scan_request){ + AICWFDBG(LOGINFO, "%s rwnx_send_scanu_cancel_req\r\n", __func__); + return rwnx_send_scanu_cancel_req(rwnx_hw, NULL); + }else{ + return 0; + } +} + + +static int rwnx_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) + +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct cfg80211_scan_request *scan_request = NULL; + + int ret = 0; + int index = 0; + + AICWFDBG(LOGINFO, "%s enter wiphy:%p\r\n", __func__, wiphy); + + if(rwnx_hw->is_sched_scan || rwnx_hw->scanning){ + AICWFDBG(LOGERROR, "%s is_sched_scanning and scanning, busy", __func__); + return -EBUSY; + } + + scan_request = (struct cfg80211_scan_request *)kmalloc(sizeof(struct cfg80211_scan_request), GFP_KERNEL); + + scan_request->ssids = request->ssids; + scan_request->n_channels = request->n_channels; + scan_request->n_ssids = request->n_match_sets; + scan_request->no_cck = false; + scan_request->ie = request->ie; + scan_request->ie_len = request->ie_len; + scan_request->flags = request->flags; + scan_request->wiphy = wiphy; + scan_request->scan_start = request->scan_start; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) + memcpy(scan_request->mac_addr, request->mac_addr, ETH_ALEN); + memcpy(scan_request->mac_addr_mask, request->mac_addr_mask, ETH_ALEN); +#endif + rwnx_hw->sched_scan_req = request; + scan_request->wdev = &rwnx_vif->wdev; + AICWFDBG(LOGDEBUG, "%s scan_request->n_channels:%d \r\n", __func__, scan_request->n_channels); + AICWFDBG(LOGDEBUG, "%s scan_request->n_ssids:%d \r\n", __func__, scan_request->n_ssids); + + for(index = 0; index < scan_request->n_ssids; index++){ + memset(scan_request->ssids[index].ssid, 0, IEEE80211_MAX_SSID_LEN); + + memcpy(scan_request->ssids[index].ssid, + request->match_sets[index].ssid.ssid, + IEEE80211_MAX_SSID_LEN); + + scan_request->ssids[index].ssid_len = request->match_sets[index].ssid.ssid_len; + + AICWFDBG(LOGDEBUG, "%s request ssid:%s len:%d \r\n", __func__, + scan_request->ssids[index].ssid, scan_request->ssids[index].ssid_len); + } + + for(index = 0;index < scan_request->n_channels; index++){ + scan_request->channels[index] = request->channels[index]; + + AICWFDBG(LOGDEBUG, "%s scan_request->channels[%d]:%d \r\n", __func__, index, + scan_request->channels[index]->center_freq); + + if(scan_request->channels[index] == NULL){ + AICWFDBG(LOGERROR, "%s ERROR!!! channels is NULL", __func__); + continue; + } + } + + rwnx_hw->is_sched_scan = true; + + if(rwnx_hw->scanning){ + AICWFDBG(LOGERROR, "%s scanning, about it", __func__); + kfree(scan_request); + return -EBUSY; + }else{ + ret = rwnx_cfg80211_scan(wiphy, scan_request); + } + + return ret; +} +#endif //CONFIG_SCHED_SCAN + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL) +/** + * @external_auth: indicates result of offloaded authentication processing from + * user space + */ +static int rwnx_cfg80211_external_auth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_external_auth_params *params) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + + //printk("%s Enter \r\n"); + if (!rwnx_vif->sta.external_auth) + return -EINVAL; + + rwnx_external_auth_disable(rwnx_vif); + return rwnx_send_sm_external_auth_required_rsp(rwnx_hw, rwnx_vif, + params->status); +} +#endif + +/** + * @add_station: Add a new station. + */ +static int rwnx_cfg80211_add_station(struct wiphy *wiphy, + struct net_device *dev, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + u8 *mac, +#else + const u8 *mac, +#endif + struct station_parameters *params) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct me_sta_add_cfm me_sta_add_cfm; + int error = 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + WARN_ON(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN); + + /* Do not add TDLS station */ + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + return 0; + + /* Indicate we are in a STA addition process - This will allow handling + * potential PS mode change indications correctly + */ + rwnx_hw->adding_sta = true; + + /* Forward the information to the LMAC */ + if ((error = rwnx_send_me_sta_add(rwnx_hw, params, mac, rwnx_vif->vif_index, + &me_sta_add_cfm))) + return error; + + // Check the status + switch (me_sta_add_cfm.status) + { + case CO_OK: + { + struct rwnx_sta *sta = &rwnx_hw->sta_table[me_sta_add_cfm.sta_idx]; + int tid; + sta->aid = params->aid; + + sta->sta_idx = me_sta_add_cfm.sta_idx; + sta->ch_idx = rwnx_vif->ch_index; + sta->vif_idx = rwnx_vif->vif_index; + sta->vlan_idx = sta->vif_idx; + sta->qos = (params->sta_flags_set & BIT(NL80211_STA_FLAG_WME)) != 0; +#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION + sta->ht = params->link_sta_params.ht_capa ? 1 : 0; + sta->vht = params->link_sta_params.vht_capa ? 1 : 0; +#else + sta->ht = params->ht_capa ? 1 : 0; + sta->vht = params->vht_capa ? 1 : 0; +#endif + + sta->acm = 0; + sta->key.hw_idx = 0; + +#ifdef CONFIG_DYNAMIC_PERPWR + sta->rssi_save = 0; + sta->per_pwrloss = 0; + sta->last_jiffies = jiffies; + INIT_WORK(&sta->per_pwr_work, aicwf_txpwer_per_sta_worker); +#endif + + if (params->local_pm != NL80211_MESH_POWER_UNKNOWN) + sta->mesh_pm = params->local_pm; + else + sta->mesh_pm = rwnx_vif->ap.next_mesh_pm; + rwnx_update_mesh_power_mode(rwnx_vif); + + for (tid = 0; tid < NX_NB_TXQ_PER_STA; tid++) { + int uapsd_bit = rwnx_hwq2uapsd[rwnx_tid2hwq[tid]]; + if (params->uapsd_queues & uapsd_bit) + sta->uapsd_tids |= 1 << tid; + else + sta->uapsd_tids &= ~(1 << tid); + } + memcpy(sta->mac_addr, mac, ETH_ALEN); +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_register_rc_stat(rwnx_hw, sta); +#endif + /* Ensure that we won't process PS change or channel switch ind*/ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_txq_sta_init(rwnx_hw, sta, rwnx_txq_vif_get_status(rwnx_vif)); + list_add_tail(&sta->list, &rwnx_vif->ap.sta_list); + sta->valid = true; + rwnx_ps_bh_enable(rwnx_hw, sta, sta->ps.active || me_sta_add_cfm.pm_state); + spin_unlock_bh(&rwnx_hw->cb_lock); + + error = 0; + if(atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt) > 0) + AICWFDBG(LOGDEBUG, "sta idx %d fc error %d.\n", sta->sta_idx, atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt)); + + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) { + struct station_info sinfo; + memset(&sinfo, 0, sizeof(struct station_info)); + sinfo.assoc_req_ies = NULL; + sinfo.assoc_req_ies_len = 0; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; + #endif + cfg80211_new_sta(rwnx_vif->ndev, sta->mac_addr, &sinfo, GFP_KERNEL); + } + +#ifdef CONFIG_BAND_STEERING + struct tmp_feature_sta *f_sta = &rwnx_hw->feature_table[rwnx_vif->ap.tmp_sta_idx]; + AICWFDBG(LOGSTEER, "usb %s idx: %d, supp_band: %d\n ", __func__, rwnx_vif->ap.tmp_sta_idx, f_sta->supported_band); + if (rwnx_vif->ap.tmp_sta_idx < NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX) + rwnx_vif->ap.tmp_sta_idx++; + else + rwnx_vif->ap.tmp_sta_idx = 0; + + sta->support_band = f_sta->supported_band; + aicwf_nl_send_new_sta_msg(rwnx_vif, sta->mac_addr); +#endif + +#ifdef CONFIG_RWNX_BFMER + if (rwnx_hw->mod_params->bfmer) + rwnx_send_bfmer_enable(rwnx_hw, sta, params->vht_capa); + + rwnx_mu_group_sta_init(sta, params->vht_capa); +#endif /* CONFIG_RWNX_BFMER */ + + #define PRINT_STA_FLAG(f) \ + (params->sta_flags_set & BIT(NL80211_STA_FLAG_##f) ? "["#f"]" : "") + + netdev_info(dev, "Add sta %d (%pM) flags=%s%s%s%s%s%s%s", + sta->sta_idx, mac, + PRINT_STA_FLAG(AUTHORIZED), + PRINT_STA_FLAG(SHORT_PREAMBLE), + PRINT_STA_FLAG(WME), + PRINT_STA_FLAG(MFP), + PRINT_STA_FLAG(AUTHENTICATED), + PRINT_STA_FLAG(TDLS_PEER), + PRINT_STA_FLAG(ASSOCIATED)); + #undef PRINT_STA_FLAG + break; + } + default: + error = -EBUSY; + break; + } + + rwnx_hw->adding_sta = false; + + return error; +} + +/** + * @del_station: Remove a station + */ +static int rwnx_cfg80211_del_station_compat(struct wiphy *wiphy, + struct net_device *dev, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + u8 *mac +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) + const u8 *mac +#else + struct station_del_parameters *params +#endif +) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_sta *cur, *tmp; + int error = 0, found = 0; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + const u8 *mac = NULL; +#endif +#ifdef AICWF_RX_REORDER + struct reord_ctrl_info *reord_info, *reord_tmp; + u8 *macaddr; + struct aicwf_rx_priv *rx_priv; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + if (params) + mac = params->mac; +#endif + AICWFDBG(LOGDEBUG ,"%s: %pM\n", __func__, mac); + + do { + spin_lock_bh(&rwnx_hw->cb_lock); + if(list_empty(&rwnx_vif->ap.sta_list)) { + spin_unlock_bh(&rwnx_hw->cb_lock); + break; + } + + list_for_each_entry_safe(cur, tmp, &rwnx_vif->ap.sta_list, list) { + if ((!mac) || (!memcmp(cur->mac_addr, mac, ETH_ALEN))) { + found = 1; + break; + } + } + + if(found) { + cur->ps.active = false; + cur->valid = false; + list_del(&cur->list); + } + spin_unlock_bh(&rwnx_hw->cb_lock); + + if(found) { + netdev_info(dev, "Del sta %d (%pM)", cur->sta_idx, cur->mac_addr); + if (cur->vif_idx != cur->vlan_idx) { + struct rwnx_vif *vlan_vif; + vlan_vif = rwnx_hw->vif_table[cur->vlan_idx]; + if (vlan_vif->up) { + if ((RWNX_VIF_TYPE(vlan_vif) == NL80211_IFTYPE_AP_VLAN) && + (vlan_vif->use_4addr)) { + vlan_vif->ap_vlan.sta_4a = NULL; + } else { + WARN(1, "Deleting sta belonging to VLAN other than AP_VLAN 4A"); + } + } + } + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) { + cfg80211_del_sta(rwnx_vif->ndev, cur->mac_addr, GFP_KERNEL); + } + +#ifdef AICWF_RX_REORDER +#ifdef AICWF_SDIO_SUPPORT + rx_priv = rwnx_hw->sdiodev->rx_priv; +#else + rx_priv = rwnx_hw->usbdev->rx_priv; +#endif + if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) { + BUG();//should be other function + } + else if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)){ + macaddr = cur->mac_addr; + AICWFDBG(LOGINFO, "deinit:macaddr:%x,%x,%x,%x,%x,%x\r\n", macaddr[0],macaddr[1],macaddr[2], \ + macaddr[3],macaddr[4],macaddr[5]); + list_for_each_entry_safe(reord_info, reord_tmp, + &rx_priv->stas_reord_list, list) { + AICWFDBG(LOGINFO, "reord_mac:%x,%x,%x,%x,%x,%x\r\n", reord_info->mac_addr[0],reord_info->mac_addr[1],reord_info->mac_addr[2], \ + reord_info->mac_addr[3],reord_info->mac_addr[4],reord_info->mac_addr[5]); + if (!memcmp(reord_info->mac_addr, macaddr, 6)) { + reord_deinit_sta(rx_priv, reord_info); + break; + } + } + } +#endif + +#ifdef CONFIG_DYNAMIC_PERPWR + cur->rssi_save = 0; + cur->per_pwrloss = 0; + cur->last_jiffies = 0; + cancel_work_sync(&cur->per_pwr_work); +#endif +#ifdef CONFIG_BAND_STEERING + aicwf_nl_send_del_sta_msg(rwnx_vif, cur->mac_addr); + cur->link_time = 0; + cur->rssi = 0; + cur->support_band = 0; +#endif + rwnx_txq_sta_deinit(rwnx_hw, cur); + error = rwnx_send_me_sta_del(rwnx_hw, cur->sta_idx, false); + if ((error != 0) && (error != -EPIPE)) + return error; + +#ifdef CONFIG_RWNX_BFMER + // Disable Beamformer if supported + rwnx_bfmer_report_del(rwnx_hw, cur); + rwnx_mu_group_sta_del(rwnx_hw, cur); +#endif /* CONFIG_RWNX_BFMER */ + +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister_rc_stat(rwnx_hw, cur); +#endif + } + + if(mac) + break; + } while (1); + + rwnx_update_mesh_power_mode(rwnx_vif); + + if(!found && mac != NULL) + return -ENOENT; + else + return 0; +} + + +void apm_staloss_work_process(struct work_struct *work) +{ + struct rwnx_hw *rwnx_hw = container_of(work, struct rwnx_hw, apmStalossWork); + struct rwnx_sta *cur, *tmp; + int error = 0; + +#ifdef AICWF_RX_REORDER + struct reord_ctrl_info *reord_info, *reord_tmp; + u8 *macaddr; + struct aicwf_rx_priv *rx_priv; +#endif + struct rwnx_vif *rwnx_vif; + bool_l found = false; + const u8 *mac = rwnx_hw->sta_mac_addr; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + // Look for VIF entry + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (rwnx_vif->vif_index == rwnx_hw->apm_vif_idx) { + found = true; + break; + } + } + + AICWFDBG(LOGINFO, "apm vif idx=%d, found=%d, mac addr=%pM\n", rwnx_hw->apm_vif_idx, found, mac); + if (!found || !rwnx_vif || (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_AP && RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_P2P_GO)) + { + return; + } + + found = false; + spin_lock_bh(&rwnx_hw->cb_lock); + list_for_each_entry_safe(cur, tmp, &rwnx_vif->ap.sta_list, list) { + if ((mac) && (!memcmp(cur->mac_addr, mac, ETH_ALEN))) { + found = true; + break; + } + } + if(found) { + cur->ps.active = false; + cur->valid = false; + list_del(&cur->list); + } + spin_unlock_bh(&rwnx_hw->cb_lock); + + if(found) { + netdev_info(rwnx_vif->ndev, "Del sta %d (%pM)", cur->sta_idx, cur->mac_addr); + if (cur->vif_idx != cur->vlan_idx) { + struct rwnx_vif *vlan_vif; + vlan_vif = rwnx_hw->vif_table[cur->vlan_idx]; + if (vlan_vif->up) { + if ((RWNX_VIF_TYPE(vlan_vif) == NL80211_IFTYPE_AP_VLAN) && + (vlan_vif->use_4addr)) { + vlan_vif->ap_vlan.sta_4a = NULL; + } else { + WARN(1, "Deleting sta belonging to VLAN other than AP_VLAN 4A"); + } + } + } + // if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) { + // cfg80211_del_sta(rwnx_vif->ndev, cur->mac_addr, GFP_KERNEL); + // } + +#ifdef AICWF_RX_REORDER +#ifdef AICWF_SDIO_SUPPORT + rx_priv = rwnx_hw->sdiodev->rx_priv; +#else + rx_priv = rwnx_hw->usbdev->rx_priv; +#endif + if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) { + BUG();//should be other function + } else if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) { + macaddr = cur->mac_addr; + AICWFDBG(LOGINFO, "deinit:macaddr:%x,%x,%x,%x,%x,%x\r\n", macaddr[0], macaddr[1], macaddr[2], \ + macaddr[3], macaddr[4], macaddr[5]); + //spin_lock_bh(&rx_priv->stas_reord_lock); + list_for_each_entry_safe(reord_info, reord_tmp, + &rx_priv->stas_reord_list, list) { + AICWFDBG(LOGINFO, "reord_mac:%x,%x,%x,%x,%x,%x\r\n", reord_info->mac_addr[0], reord_info->mac_addr[1], reord_info->mac_addr[2], \ + reord_info->mac_addr[3], reord_info->mac_addr[4], reord_info->mac_addr[5]); + if (!memcmp(reord_info->mac_addr, macaddr, 6)) { + reord_deinit_sta(rx_priv, reord_info); + break; + } + } + //spin_unlock_bh(&rx_priv->stas_reord_lock); + } +#endif + + rwnx_txq_sta_deinit(rwnx_hw, cur); + error = rwnx_send_me_sta_del(rwnx_hw, cur->sta_idx, false); + if ((error != 0) && (error != -EPIPE)) + return; + +#ifdef CONFIG_RWNX_BFMER + // Disable Beamformer if supported + rwnx_bfmer_report_del(rwnx_hw, cur); + rwnx_mu_group_sta_del(rwnx_hw, cur); +#endif /* CONFIG_RWNX_BFMER */ + + +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister_rc_stat(rwnx_hw, cur); +#endif + }else { + printk("sta not found: %pM\n", mac); + return; + } + + rwnx_update_mesh_power_mode(rwnx_vif); +} + + +void apm_probe_sta_work_process(struct work_struct *work) +{ + struct apm_probe_sta *probe_sta = container_of(work, struct apm_probe_sta, apmprobestaWork); + struct rwnx_vif *rwnx_vif = container_of(probe_sta, struct rwnx_vif, sta_probe); + bool found = false; + struct rwnx_sta *cur, *tmp; + + u8 *mac = rwnx_vif->sta_probe.sta_mac_addr; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock); + list_for_each_entry_safe(cur, tmp, &rwnx_vif->ap.sta_list, list) { + if (!memcmp(cur->mac_addr, mac, ETH_ALEN)) { + found = true; + break; + } + } + spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock); + + printk("sta %pM found = %d\n", mac, found); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + if(found) + cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 1, 0, false, GFP_ATOMIC); + else + cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 0, 0, false, GFP_ATOMIC); +#else + if(found) + cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 1, GFP_ATOMIC); + else + cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 0, GFP_ATOMIC); +#endif + rwnx_vif->sta_probe.probe_id ++; +} + +/** + * @change_station: Modify a given station. Note that flags changes are not much + * validated in cfg80211, in particular the auth/assoc/authorized flags + * might come to the driver in invalid combinations -- make sure to check + * them, also against the existing state! Drivers must call + * cfg80211_check_station_change() to validate the information. + */ +static int rwnx_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + u8 *mac, +#else + const u8 *mac, +#endif + struct station_parameters *params) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *vif = netdev_priv(dev); + struct rwnx_sta *sta; + + sta = rwnx_get_sta(rwnx_hw, mac); + if (!sta) + { + /* Add the TDLS station */ + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + { + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct me_sta_add_cfm me_sta_add_cfm; + int error = 0; + + /* Indicate we are in a STA addition process - This will allow handling + * potential PS mode change indications correctly + */ + rwnx_hw->adding_sta = true; + + /* Forward the information to the LMAC */ + if ((error = rwnx_send_me_sta_add(rwnx_hw, params, mac, rwnx_vif->vif_index, + &me_sta_add_cfm))) + return error; + + // Check the status + switch (me_sta_add_cfm.status) + { + case CO_OK: + { + int tid; + sta = &rwnx_hw->sta_table[me_sta_add_cfm.sta_idx]; + sta->aid = params->aid; + sta->sta_idx = me_sta_add_cfm.sta_idx; + sta->ch_idx = rwnx_vif->ch_index; + sta->vif_idx = rwnx_vif->vif_index; + sta->vlan_idx = sta->vif_idx; + sta->qos = (params->sta_flags_set & BIT(NL80211_STA_FLAG_WME)) != 0; +#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION + sta->ht = params->link_sta_params.ht_capa ? 1 : 0; + sta->vht = params->link_sta_params.vht_capa ? 1 : 0; +#else + sta->ht = params->ht_capa ? 1 : 0; + sta->vht = params->vht_capa ? 1 : 0; +#endif + sta->acm = 0; + for (tid = 0; tid < NX_NB_TXQ_PER_STA; tid++) { + int uapsd_bit = rwnx_hwq2uapsd[rwnx_tid2hwq[tid]]; + if (params->uapsd_queues & uapsd_bit) + sta->uapsd_tids |= 1 << tid; + else + sta->uapsd_tids &= ~(1 << tid); + } + memcpy(sta->mac_addr, mac, ETH_ALEN); +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_register_rc_stat(rwnx_hw, sta); +#endif + /* Ensure that we won't process PS change or channel switch ind*/ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_txq_sta_init(rwnx_hw, sta, rwnx_txq_vif_get_status(rwnx_vif)); + if (rwnx_vif->tdls_status == TDLS_SETUP_RSP_TX) { + rwnx_vif->tdls_status = TDLS_LINK_ACTIVE; + sta->tdls.initiator = true; + sta->tdls.active = true; + } + /* Set TDLS channel switch capability */ + if ((params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) && + !rwnx_vif->tdls_chsw_prohibited) + sta->tdls.chsw_allowed = true; + rwnx_vif->sta.tdls_sta = sta; + sta->valid = true; + spin_unlock_bh(&rwnx_hw->cb_lock); +#ifdef CONFIG_RWNX_BFMER + if (rwnx_hw->mod_params->bfmer) + rwnx_send_bfmer_enable(rwnx_hw, sta, params->vht_capa); + + rwnx_mu_group_sta_init(sta, NULL); +#endif /* CONFIG_RWNX_BFMER */ + + #define PRINT_STA_FLAG(f) \ + (params->sta_flags_set & BIT(NL80211_STA_FLAG_##f) ? "["#f"]" : "") + + netdev_info(dev, "Add %s TDLS sta %d (%pM) flags=%s%s%s%s%s%s%s", + sta->tdls.initiator ? "initiator" : "responder", + sta->sta_idx, mac, + PRINT_STA_FLAG(AUTHORIZED), + PRINT_STA_FLAG(SHORT_PREAMBLE), + PRINT_STA_FLAG(WME), + PRINT_STA_FLAG(MFP), + PRINT_STA_FLAG(AUTHENTICATED), + PRINT_STA_FLAG(TDLS_PEER), + PRINT_STA_FLAG(ASSOCIATED)); + #undef PRINT_STA_FLAG + + break; + } + default: + error = -EBUSY; + break; + } + + rwnx_hw->adding_sta = false; + } else { + return -EINVAL; + } + } + + if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) + rwnx_send_me_set_control_port_req(rwnx_hw, + (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) != 0, + sta->sta_idx); + + if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_MESH_POINT) { + if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) { + if (params->plink_state < NUM_NL80211_PLINK_STATES) { + rwnx_send_mesh_peer_update_ntf(rwnx_hw, vif, sta->sta_idx, params->plink_state); + } + } + + if (params->local_pm != NL80211_MESH_POWER_UNKNOWN) { + sta->mesh_pm = params->local_pm; + rwnx_update_mesh_power_mode(vif); + } + } + + if (params->vlan) { + uint8_t vlan_idx; + + vif = netdev_priv(params->vlan); + vlan_idx = vif->vif_index; + + if (sta->vlan_idx != vlan_idx) { + struct rwnx_vif *old_vif; + old_vif = rwnx_hw->vif_table[sta->vlan_idx]; + rwnx_txq_sta_switch_vif(sta, old_vif, vif); + sta->vlan_idx = vlan_idx; + + if ((RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP_VLAN) && + (vif->use_4addr)) { + WARN((vif->ap_vlan.sta_4a), + "4A AP_VLAN interface with more than one sta"); + vif->ap_vlan.sta_4a = sta; + } + + if ((RWNX_VIF_TYPE(old_vif) == NL80211_IFTYPE_AP_VLAN) && + (old_vif->use_4addr)) { + old_vif->ap_vlan.sta_4a = NULL; + } + } + } + + return 0; +} + +/** + * @start_ap: Start acting in AP mode defined by the parameters. + */ +static int rwnx_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ap_settings *settings) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct apm_start_cfm apm_start_cfm; + struct rwnx_ipc_elem_var elem; + struct rwnx_sta *sta; + int error = 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + INIT_WORK(&rwnx_vif->sta_probe.apmprobestaWork, apm_probe_sta_work_process); + rwnx_vif->sta_probe.apmprobesta_wq = create_singlethread_workqueue("apmprobe_wq"); + if (!rwnx_vif->sta_probe.apmprobesta_wq) { + txrx_err("insufficient memory to create apmprobe_wq.\n"); + return -ENOBUFS; + } + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) + rwnx_hw->is_p2p_connected = 1; + /* Forward the information to the LMAC */ + if ((error = rwnx_send_apm_start_req(rwnx_hw, rwnx_vif, settings, + &apm_start_cfm, &elem))) + goto end; + + // Check the status + switch (apm_start_cfm.status) + { + case CO_OK: + { + u8 txq_status = 0; + rwnx_vif->ap.bcmc_index = apm_start_cfm.bcmc_idx; + rwnx_vif->ap.flags = 0; + rwnx_vif->ap.csa = NULL; + #if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL) + rwnx_vif->ap.aic_index = 0; + #endif +#ifdef CONFIG_BAND_STEERING + rwnx_vif->ap.band = (enum band_type)((settings->chandef).chan)->band; + rwnx_vif->ap.freq = ((settings->chandef).chan)->center_freq; + rwnx_vif->ap.tmp_sta_idx = 0; +#endif + sta = &rwnx_hw->sta_table[apm_start_cfm.bcmc_idx]; + sta->valid = true; + sta->aid = 0; + sta->sta_idx = apm_start_cfm.bcmc_idx; + sta->ch_idx = apm_start_cfm.ch_idx; + sta->vif_idx = rwnx_vif->vif_index; + sta->qos = false; + sta->acm = 0; + sta->ps.active = false; + rwnx_mu_group_sta_init(sta, NULL); + spin_lock_bh(&rwnx_hw->cb_lock); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) + rwnx_chanctx_link(rwnx_vif, apm_start_cfm.ch_idx, + NULL); +#else + rwnx_chanctx_link(rwnx_vif, apm_start_cfm.ch_idx, + &settings->chandef); +#endif + if (rwnx_hw->cur_chanctx != apm_start_cfm.ch_idx) { + txq_status = RWNX_TXQ_STOP_CHAN; + } + rwnx_txq_vif_init(rwnx_hw, rwnx_vif, txq_status); + spin_unlock_bh(&rwnx_hw->cb_lock); + + netif_tx_start_all_queues(dev); + netif_carrier_on(dev); + error = 0; + /* If the AP channel is already the active, we probably skip radar + activation on MM_CHANNEL_SWITCH_IND (unless another vif use this + ctxt). In anycase retest if radar detection must be activated + */ + if (txq_status == 0) { + rwnx_radar_detection_enable_on_cur_channel(rwnx_hw); + } + break; + } + case CO_BUSY: + error = -EINPROGRESS; + break; + case CO_OP_IN_PROGRESS: + error = -EALREADY; + break; + default: + error = -EIO; + break; + } + + if (error) { + netdev_info(dev, "Failed to start AP (%d)", error); + } else { + netdev_info(dev, "AP started: ch=%d, bcmc_idx=%d channel=%d bw=%d", + rwnx_vif->ch_index, rwnx_vif->ap.bcmc_index, + ((settings->chandef).chan)->center_freq, + ((settings->chandef).width)); + } + +#ifdef CONFIG_BAND_STEERING + if (!error) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + init_timer(&rwnx_vif->steer_timer); + rwnx_vif->steer_timer.data = (ulong)rwnx_vif; + rwnx_vif->steer_timer.function = aicwf_steering_timeout; +#else + timer_setup(&rwnx_vif->steer_timer, aicwf_steering_timeout, 0); +#endif + aicwf_nl_hook(rwnx_vif, rwnx_vif->ap.band, rwnx_vif->rwnx_hw->iface_idx); + + INIT_WORK(&rwnx_vif->steer_work, aicwf_steering_work); + for (int i = 0; i < MAX_PENDING_PROBES; i++) { + rwnx_vif->pb_pool[i].in_use = false; + INIT_WORK(&rwnx_vif->pb_pool[i].rsp_work, rwnx_probersp_work); + } + rwnx_vif->rsp_wq = create_workqueue("aic_usb_rsp_wq"); + if (!rwnx_vif->rsp_wq) { + AICWFDBG(LOGERROR, "%s create rsp_wq fail\n", __func__); + return -ENOBUFS; + } + rwnx_vif->ap.start = true; + mod_timer(&rwnx_vif->steer_timer, jiffies + msecs_to_jiffies(STEER_UPFATE_TIME)); + } +#endif + + end: + //rwnx_ipc_elem_var_deallocs(rwnx_hw, &elem); + + return error; +} + + +/** + * @change_beacon: Change the beacon parameters for an access point mode + * interface. This should reject the call when AP mode wasn't started. + */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 7, 0) +static int rwnx_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ap_update *info) +#else +static int rwnx_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_beacon_data *info) +#endif +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *vif = netdev_priv(dev); + struct rwnx_bcn *bcn = &vif->ap.bcn; + struct rwnx_ipc_elem_var elem; + u8 *buf; + int error = 0; + RWNX_DBG(RWNX_FN_ENTRY_STR); + + //elem init + elem.dma_addr = 0; + + // Build the beacon +#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 7, 0) + buf = rwnx_build_bcn(bcn, &info->beacon); +#else + buf = rwnx_build_bcn(bcn, info); +#endif + if (!buf) + return -ENOMEM; + + rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, bcn->len); + +#if 0 + // Sync buffer for FW + if ((error = rwnx_ipc_elem_var_allocs(rwnx_hw, &elem, bcn->len, DMA_TO_DEVICE, + buf, NULL, NULL))) + return error; +#endif + // Forward the information to the LMAC + error = rwnx_send_bcn_change(rwnx_hw, vif->vif_index, elem.dma_addr, + bcn->len, bcn->head_len, bcn->tim_len, NULL); + +#if 0 + rwnx_ipc_elem_var_deallocs(rwnx_hw, &elem); +#else + + +#endif + return error; +} + +/** + * * @stop_ap: Stop being an AP, including stopping beaconing. + */ +#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION) +static int rwnx_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id) +#else +static int rwnx_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) +#endif + +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_sta *sta; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + + /* delete any remaining STA*/ + while (!list_empty(&rwnx_vif->ap.sta_list)) { + rwnx_cfg80211_del_station_compat(wiphy, dev, NULL); + } + +#ifdef CONFIG_BAND_STEERING + rwnx_vif->ap.start = false; + aicwf_nl_hook_deinit(rwnx_vif->ap.band, rwnx_vif->rwnx_hw->iface_idx); + if (timer_pending(&rwnx_vif->steer_timer)) + del_timer_sync(&rwnx_vif->steer_timer); + cancel_work_sync(&rwnx_vif->steer_work); + flush_workqueue(rwnx_vif->rsp_wq); + destroy_workqueue(rwnx_vif->rsp_wq); +#endif + + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) + rwnx_hw->is_p2p_connected = 0; + rwnx_radar_cancel_cac(&rwnx_hw->radar); + rwnx_send_apm_stop_req(rwnx_hw, rwnx_vif); + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_chanctx_unlink(rwnx_vif); + spin_unlock_bh(&rwnx_hw->cb_lock); + + /* delete BC/MC STA */ + sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index]; + rwnx_txq_vif_deinit(rwnx_hw, rwnx_vif); + rwnx_del_bcn(&rwnx_vif->ap.bcn); + rwnx_del_csa(rwnx_vif); + flush_workqueue(rwnx_vif->sta_probe.apmprobesta_wq); + destroy_workqueue(rwnx_vif->sta_probe.apmprobesta_wq); + + netdev_info(dev, "AP Stopped"); + + return 0; +} + +/** + * @set_monitor_channel: Set the monitor mode channel for the device. If other + * interfaces are active this callback should reject the configuration. + * If no interfaces are active or the device is down, the channel should + * be stored for when a monitor interface becomes active. + * + * Also called internaly with chandef set to NULL simply to retrieve the channel + * configured at firmware level. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) +static inline bool +cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1, + const struct cfg80211_chan_def *chandef2) +{ + return (chandef1->chan == chandef2->chan && + chandef1->width == chandef2->width && + chandef1->center_freq1 == chandef2->center_freq1 && + chandef1->center_freq2 == chandef2->center_freq2); +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)) +static int rwnx_cfg80211_set_monitor_channel(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_chan_def *chandef) +#else +static int rwnx_cfg80211_set_monitor_channel(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef) +#endif +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif; + struct me_config_monitor_cfm cfm; + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (rwnx_hw->monitor_vif == RWNX_INVALID_VIF) + return -EINVAL; + + rwnx_vif = rwnx_hw->vif_table[rwnx_hw->monitor_vif]; + + // Do nothing if monitor interface is already configured with the requested channel + if (rwnx_chanctx_valid(rwnx_hw, rwnx_vif->ch_index)) { + struct rwnx_chanctx *ctxt; + ctxt = &rwnx_vif->rwnx_hw->chanctx_table[rwnx_vif->ch_index]; + if (chandef && cfg80211_chandef_identical(&ctxt->chan_def, chandef)) + return 0; + } + + // Always send command to firmware. It allows to retrieve channel context index + // and its configuration. + if (rwnx_send_config_monitor_req(rwnx_hw, chandef, &cfm)) + return -EIO; + + // Always re-set channel context info + rwnx_chanctx_unlink(rwnx_vif); + + + + // If there is also a STA interface not yet connected then monitor interface + // will only have a channel context after the connection of the STA interface. + if (cfm.chan_index != RWNX_CH_NOT_SET) + { + struct cfg80211_chan_def mon_chandef; + + if (rwnx_hw->vif_started > 1) { + // In this case we just want to update the channel context index not + // the channel configuration + rwnx_chanctx_link(rwnx_vif, cfm.chan_index, NULL); + return -EBUSY; + } + + mon_chandef.chan = ieee80211_get_channel(wiphy, cfm.chan.prim20_freq); + mon_chandef.center_freq1 = cfm.chan.center1_freq; + mon_chandef.center_freq2 = cfm.chan.center2_freq; + mon_chandef.width = chnl2bw[cfm.chan.type]; + rwnx_chanctx_link(rwnx_vif, cfm.chan_index, &mon_chandef); + } + + return 0; +} + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)) +int rwnx_cfg80211_set_monitor_channel_(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_chan_def *chandef) +{ + return rwnx_cfg80211_set_monitor_channel(wiphy, dev, chandef); +} +#else +int rwnx_cfg80211_set_monitor_channel_(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef) +{ + return rwnx_cfg80211_set_monitor_channel(wiphy, chandef); +} +#endif + + +/** + * @probe_client: probe an associated client, must return a cookie that it + * later passes to cfg80211_probe_status(). + */ +int rwnx_cfg80211_probe_client(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u64 *cookie) +{ + //struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *vif = netdev_priv(dev); + struct rwnx_sta *sta = NULL; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if((RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_AP) && (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_P2P_GO) && + (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_AP_VLAN)) + return -EINVAL; + + spin_lock_bh(&vif->rwnx_hw->cb_lock); + list_for_each_entry(sta, &vif->ap.sta_list, list){ + if (sta->valid && ether_addr_equal(sta->mac_addr, peer)) + break; + } + spin_unlock_bh(&vif->rwnx_hw->cb_lock); + + if (!sta) + return -ENOENT; + + + memcpy(vif->sta_probe.sta_mac_addr, peer, 6); + queue_work(vif->sta_probe.apmprobesta_wq, &vif->sta_probe.apmprobestaWork); + + *cookie = vif->sta_probe.probe_id; + + return 0; +} + +/** + * @mgmt_frame_register: Notify driver that a management frame type was + * registered. Note that this callback may not sleep, and cannot run + * concurrently with itself. + */ +void rwnx_cfg80211_mgmt_frame_register(struct wiphy *wiphy, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)) + struct net_device *dev, +#else + struct wireless_dev *wdev, +#endif + u16 frame_type, bool reg) +{ +} + +/** + * @set_wiphy_params: Notify that wiphy parameters have changed; + * @changed bitfield (see &enum wiphy_params_flags) describes which values + * have changed. The actual parameter values are available in + * struct wiphy. If returning an error, no value should be changed. + */ +static int rwnx_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + return 0; +} + + +/** + * @set_tx_power: set the transmit power according to the parameters, + * the power passed is in mBm, to get dBm use MBM_TO_DBM(). The + * wdev may be %NULL if power was set for the wiphy, and will + * always be %NULL unless the driver supports per-vif TX power + * (as advertised by the nl80211 feature flag.) + */ +static int rwnx_cfg80211_set_tx_power(struct wiphy *wiphy, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + struct wireless_dev *wdev, +#endif + enum nl80211_tx_power_setting type, int mbm) +{ + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) + struct wireless_dev *wdev = NULL; + #endif + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *vif; + s8 pwr; + int res = 0; + + if (type == NL80211_TX_POWER_AUTOMATIC) { + pwr = 0x7f; + } else { + pwr = MBM_TO_DBM(mbm); + } + + if (wdev) { + vif = container_of(wdev, struct rwnx_vif, wdev); + res = rwnx_send_set_power(rwnx_hw, vif->vif_index, pwr, NULL); + } else { + list_for_each_entry(vif, &rwnx_hw->vifs, list) { + res = rwnx_send_set_power(rwnx_hw, vif->vif_index, pwr, NULL); + if (res) + break; + } + } + + return res; +} + + +/** + * @set_power_mgmt: set the power save to one of those two modes: + * Power-save off + * Power-save on - Dynamic mode + */ +static int rwnx_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, + bool enabled, int timeout) +{ +#if 0 + + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + u8 ps_mode; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + if (timeout >= 0) + netdev_info(dev, "Ignore timeout value %d", timeout); + + if (!(rwnx_hw->version_cfm.features & BIT(MM_FEAT_PS_BIT))) + enabled = false; + + if (enabled) { + /* Switch to Dynamic Power Save */ + ps_mode = MM_PS_MODE_ON_DYN; + } else { + /* Exit Power Save */ + ps_mode = MM_PS_MODE_OFF; + } + + return rwnx_send_me_set_ps_mode(rwnx_hw, ps_mode); +#else + return 0; +#endif +} + + +static int rwnx_cfg80211_set_txq_params(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_txq_params *params) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + u8 hw_queue, aifs, cwmin, cwmax; + u32 param; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) + hw_queue = rwnx_ac2hwq[0][params->queue]; +#else + hw_queue = rwnx_ac2hwq[0][params->ac]; +#endif + + aifs = params->aifs; + cwmin = fls(params->cwmin); + cwmax = fls(params->cwmax); + + /* Store queue information in general structure */ + param = (u32) (aifs << 0); + param |= (u32) (cwmin << 4); + param |= (u32) (cwmax << 8); + param |= (u32) (params->txop) << 12; + + /* Send the MM_SET_EDCA_REQ message to the FW */ + return rwnx_send_set_edca(rwnx_hw, hw_queue, param, false, rwnx_vif->vif_index); +} + + +/** + * @remain_on_channel: Request the driver to remain awake on the specified + * channel for the specified duration to complete an off-channel + * operation (e.g., public action frame exchange). When the driver is + * ready on the requested channel, it must indicate this with an event + * notification by calling cfg80211_ready_on_channel(). + */ + +static int +rwnx_cfg80211_remain_on_channel_(struct wiphy *wiphy, + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + struct wireless_dev *wdev, + #else + struct net_device *dev, + #endif + struct ieee80211_channel *chan, + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) + enum nl80211_channel_type channel_type, + #endif + unsigned int duration, u64 *cookie, bool mgmt_roc_flag) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); +#else + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct wireless_dev *wdev = &rwnx_vif->wdev; +#endif + struct rwnx_roc_elem *roc_elem; + struct mm_add_if_cfm add_if_cfm; + struct mm_remain_on_channel_cfm roc_cfm; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* For debug purpose (use ftrace kernel option) */ +#ifdef CREATE_TRACE_POINTS + trace_roc(rwnx_vif->vif_index, chan->center_freq, duration); +#endif + +#if 1// Fix up layer to send 50ms duration. + if(duration < 100){ + AICWFDBG(LOGINFO, "%s duration time change to 200ms \r\n", __func__); + duration = 200; + } +#endif + + /* Check that no other RoC procedure has been launched */ + if (rwnx_hw->roc_elem) { + msleep(2); + if (rwnx_hw->roc_elem) { + AICWFDBG(LOGERROR, "remain_on_channel fail\n"); +#if 0//AIDEN test + roc_elem = rwnx_hw->roc_elem; + kfree(roc_elem); + rwnx_hw->roc_elem = NULL; + //msleep(500); +#else + return -EBUSY; +#endif + } + } + //msleep(500); + AICWFDBG(LOGINFO, "remain:%d,%d,%d,duration:%d\n",rwnx_vif->vif_index, rwnx_vif->is_p2p_vif, rwnx_hw->is_p2p_alive, duration); + //if (rwnx_vif->is_p2p_vif) { + AICWFDBG(LOGINFO, "rwnx_vif:%p rwnx_hw->p2p_dev_vif:%p rwnx_vif->up:%d\r\n", + rwnx_vif, rwnx_hw->p2p_dev_vif, rwnx_vif->up); +#ifdef CONFIG_USE_P2P0 + if (rwnx_vif->is_p2p_vif) { +#else + if (rwnx_vif == rwnx_hw->p2p_dev_vif && !rwnx_vif->up) { +#endif + if (!rwnx_hw->is_p2p_alive) { + error = rwnx_send_add_if (rwnx_hw, rwnx_vif->wdev.address, //wdev->netdev->dev_addr, + RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm); + if (error) + return -EIO; + + if (add_if_cfm.status != 0) { + return -EIO; + } + /* Save the index retrieved from LMAC */ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_vif->vif_index = add_if_cfm.inst_nbr; + rwnx_vif->up = true; + rwnx_hw->vif_started++; + rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif; + spin_unlock_bh(&rwnx_hw->cb_lock); + rwnx_hw->is_p2p_alive = 1; +#ifndef CONFIG_USE_P2P0 + mod_timer(&rwnx_hw->p2p_alive_timer, jiffies + msecs_to_jiffies(1000)); + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); +#endif + } + else { +#ifndef CONFIG_USE_P2P0 + mod_timer(&rwnx_hw->p2p_alive_timer, jiffies + msecs_to_jiffies(1000)); + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); +#endif + } + } + + /* Allocate a temporary RoC element */ + roc_elem = kmalloc(sizeof(struct rwnx_roc_elem), GFP_KERNEL); + + /* Verify that element has well been allocated */ + if (!roc_elem) { + return -ENOMEM; + } + + /* Initialize the RoC information element */ + roc_elem->wdev = wdev; + roc_elem->chan = chan; + roc_elem->duration = duration; + roc_elem->mgmt_roc = mgmt_roc_flag; + roc_elem->on_chan = false; + + /* Initialize the OFFCHAN TX queue to allow off-channel transmissions */ + rwnx_txq_offchan_init(rwnx_vif); + + /* Forward the information to the FMAC */ + rwnx_hw->roc_elem = roc_elem; + error = rwnx_send_roc(rwnx_hw, rwnx_vif, chan, duration, &roc_cfm); + + /* If no error, keep all the information for handling of end of procedure */ + if (error == 0) { + + /* Set the cookie value */ + *cookie = (u64)(rwnx_hw->roc_cookie_cnt); + if(roc_cfm.status) { + // failed to roc + rwnx_hw->roc_elem = NULL; + kfree(roc_elem); + rwnx_txq_offchan_deinit(rwnx_vif); + return -EBUSY; + } + } else { + /* Free the allocated element */ + rwnx_hw->roc_elem = NULL; + kfree(roc_elem); + rwnx_txq_offchan_deinit(rwnx_vif); + } + + return error; +} + + +static int +rwnx_cfg80211_remain_on_channel(struct wiphy *wiphy, + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + struct wireless_dev *wdev, + #else + struct net_device *dev, + #endif + struct ieee80211_channel *chan, + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) + enum nl80211_channel_type channel_type, + #endif + unsigned int duration, u64 *cookie) +{ + return rwnx_cfg80211_remain_on_channel_(wiphy, + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + wdev, + #else + dev, + #endif + chan, + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) + channel_type, + #endif + duration, cookie, false); +} + +/** + * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. + * This allows the operation to be terminated prior to timeout based on + * the duration value. + */ +static int rwnx_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) + struct net_device *dev, + #else + struct wireless_dev *wdev, + #endif + u64 cookie) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); +#ifdef CREATE_TRACE_POINTS +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) + struct rwnx_vif *rwnx_vif = netdev_priv(dev); +#else + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); +#endif +#endif + RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifdef CREATE_TRACE_POINTS + /* For debug purpose (use ftrace kernel option) */ + trace_cancel_roc(rwnx_vif->vif_index); +#endif + /* Check if a RoC procedure is pending */ + if (!rwnx_hw->roc_elem) { + return 0; + } + + /* Forward the information to the FMAC */ + return rwnx_send_cancel_roc(rwnx_hw); +} + +#define IS_2P4GHZ(n) (n >= 2412 && n <= 2484) +#define IS_5GHZ(n) (n >= 4000 && n <= 5895) +#define DEFAULT_NOISE_FLOOR_2GHZ (-89) +#define DEFAULT_NOISE_FLOOR_5GHZ (-92) +/** + * @dump_survey: get site survey information. + */ +static int rwnx_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *netdev, + int idx, struct survey_info *info) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct ieee80211_supported_band *sband; + struct rwnx_survey_info *rwnx_survey; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (idx >= ARRAY_SIZE(rwnx_hw->survey)) + return -ENOENT; + + rwnx_survey = &rwnx_hw->survey[idx]; + + // Check if provided index matches with a supported 2.4GHz channel + sband = wiphy->bands[NL80211_BAND_2GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + //#ifdef USE_5G + if (rwnx_hw->band_5g_support) { + if (!sband) { + // Check if provided index matches with a supported 5GHz channel + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + if (!sband || idx >= sband->n_channels) + return -ENOENT; + } + }else{ + //#else + if (!sband || idx >= sband->n_channels) + return -ENOENT; + } + //#endif + + // Fill the survey + info->channel = &sband->channels[idx]; + info->filled = rwnx_survey->filled; + + if (rwnx_survey->filled != 0) { + SURVEY_TIME(info) = (u64)rwnx_survey->chan_time_ms; + SURVEY_TIME_BUSY(info) = (u64)rwnx_survey->chan_time_busy_ms; + //info->noise = rwnx_survey->noise_dbm; + info->noise = ((IS_2P4GHZ(info->channel->center_freq)) ? DEFAULT_NOISE_FLOOR_2GHZ : + (IS_5GHZ(info->channel->center_freq)) ? DEFAULT_NOISE_FLOOR_5GHZ : DEFAULT_NOISE_FLOOR_5GHZ); + + + // Set the survey report as not used + if(info->noise == 0){ + rwnx_survey->filled = 0; + }else{ + rwnx_survey->filled |= SURVEY_INFO_NOISE_DBM; + } + + } + + return 0; +} + +/** + * @get_channel: Get the current operating channel for the virtual interface. + * For monitor interfaces, it should return %NULL unless there's a single + * current monitoring channel. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +static int rwnx_cfg80211_get_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, +#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION + unsigned int link_id, +#endif + struct cfg80211_chan_def *chandef) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); + struct rwnx_chanctx *ctxt; + + if (!rwnx_vif->up) { + return -ENODATA; + } + + if (rwnx_vif->vif_index == rwnx_hw->monitor_vif) + { + //retrieve channel from firmware +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)) + rwnx_cfg80211_set_monitor_channel(wiphy, wdev->netdev, NULL); +#else + rwnx_cfg80211_set_monitor_channel(wiphy, NULL); +#endif + } + + //Check if channel context is valid + if (!rwnx_chanctx_valid(rwnx_hw, rwnx_vif->ch_index)){ + return -ENODATA; + } + + ctxt = &rwnx_hw->chanctx_table[rwnx_vif->ch_index]; + *chandef = ctxt->chan_def; + + return 0; +} +#else +struct ieee80211_channel *rwnx_cfg80211_get_channel(struct wiphy *wiphy) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct ieee80211_channel *chan = NULL; + struct rwnx_vif *vif; + bool_l found = false; + //may TBD + + list_for_each_entry(vif, &rwnx_hw->vifs, list) + { + if(vif->wdev.iftype == NL80211_IFTYPE_AP) + { + found = true; + break; + } + } + + if(found && rwnx_hw->set_chan.center_freq) { + chan = kzalloc(sizeof(struct ieee80211_channel), GFP_KERNEL); + memcpy((u8 *)chan, (u8 *)&rwnx_hw->set_chan, sizeof(struct ieee80211_channel)); + } + + return chan; +} +#endif + + +/** + * @mgmt_tx: Transmit a management frame. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +static int rwnx_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, + u64 *cookie) +#else +static int rwnx_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *channel, bool offchan, + unsigned int wait, const u8* buf, size_t len, + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + bool no_cck, + #endif + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + bool dont_wait_for_ack, + #endif + u64 *cookie) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct wireless_dev *wdev = &rwnx_vif->wdev; +#else + struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev); +#endif + struct rwnx_sta *rwnx_sta = NULL; + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + struct ieee80211_channel *channel = params->chan; + const u8 *buf = params->buf; + //size_t len = params->len; + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ + struct ieee80211_mgmt *mgmt = (void *)buf; + bool ap = false; + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + bool offchan = false; + #endif + + /* Check if provided VIF is an AP or a STA one */ + switch (RWNX_VIF_TYPE(rwnx_vif)) { + case NL80211_IFTYPE_AP_VLAN: + rwnx_vif = rwnx_vif->ap_vlan.master; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_MESH_POINT: + ap = true; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + default: + break; + } + +#ifdef CONFIG_BAND_STEERING + if (ieee80211_is_assoc_resp(mgmt->frame_control)) { + AICWFDBG(LOGSTEER, "usb assoc_resp: da: %pM\n", mgmt->da); + if (aicwf_band_steering_block_chk(rwnx_vif, mgmt->da)) { + AICWFDBG(LOGSTEER, "[STEERING] usb assoc_rsp refuse temp, %pM\n", mgmt->da); + return 0; + } + } +#endif + + /* Get STA on which management frame has to be sent */ + rwnx_sta = rwnx_retrieve_sta(rwnx_hw, rwnx_vif, mgmt->da, + mgmt->frame_control, ap); + + AICWFDBG(LOGDEBUG, "%s rwnx_sta:%p RWNX_VIF_TYPE(rwnx_vif):%d \r\n", __func__, rwnx_sta, RWNX_VIF_TYPE(rwnx_vif)); + +#ifdef CREATE_TRACE_POINTS + trace_mgmt_tx((channel) ? channel->center_freq : 0, + rwnx_vif->vif_index, (rwnx_sta) ? rwnx_sta->sta_idx : 0xFF, + mgmt); +#endif + if (ap || rwnx_sta) + goto send_frame; + + /* Not an AP interface sending frame to unknown STA: + * This is allowed for external authetication */ + if (rwnx_vif->sta.external_auth && ieee80211_is_auth(mgmt->frame_control)) + goto send_frame; + + /* Otherwise ROC is needed */ + if (!channel) { + AICWFDBG(LOGERROR, "mgmt_tx fail since channel\n"); + return -EINVAL; + } + + /* Check that a RoC is already pending */ + if (rwnx_hw->roc_elem) { + /* Get VIF used for current ROC */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + struct rwnx_vif *rwnx_roc_vif = netdev_priv(rwnx_hw->roc_elem->wdev->netdev); +#else + struct rwnx_vif *rwnx_roc_vif = container_of(rwnx_hw->roc_elem->wdev, struct rwnx_vif, wdev);//netdev_priv(rwnx_hw->roc_elem->wdev->netdev); +#endif + + /* Check if RoC channel is the same than the required one */ + if ((rwnx_hw->roc_elem->chan->center_freq != channel->center_freq) + || (rwnx_vif->vif_index != rwnx_roc_vif->vif_index)) { + AICWFDBG(LOGERROR, "mgmt rx chan invalid: %d, %d", rwnx_hw->roc_elem->chan->center_freq, channel->center_freq); + return -EINVAL; + } + } else { + u64 cookie; + int error; + + AICWFDBG(LOGINFO, "mgmt rx remain on chan\n"); + + /* Start a ROC procedure for 30ms */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + error = rwnx_cfg80211_remain_on_channel_(wiphy, wdev, channel, + 30, &cookie, true); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + error = rwnx_cfg80211_remain_on_channel_(wiphy, wdev, channel, NL80211_CHAN_NO_HT, + 30, &cookie, true); +#else + error = rwnx_cfg80211_remain_on_channel_(wiphy, dev, channel, NL80211_CHAN_NO_HT, + 30, &cookie, true); +#endif + + if (error) { + AICWFDBG(LOGERROR, "mgmt rx chan err\n"); + return error; + } + /* Need to keep in mind that RoC has been launched internally in order to + * avoid to call the cfg80211 callback once expired */ + //rwnx_hw->roc_elem->mgmt_roc = true; + } + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + offchan = true; + #endif + +send_frame: + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + return rwnx_start_mgmt_xmit(rwnx_vif, rwnx_sta, params, offchan, cookie); + #else + return rwnx_start_mgmt_xmit(rwnx_vif, rwnx_sta, channel, offchan, wait, buf, len, no_cck, dont_wait_for_ack, cookie); + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ +} + +/** + * @start_radar_detection: Start radar detection in the driver. + */ +static +int rwnx_cfg80211_start_radar_detection(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_chan_def *chandef + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) + , u32 cac_time_ms + #endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0)) + , int link_id +#endif + ) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct apm_start_cac_cfm cfm; + + printk("%s\n", __func__); + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) + rwnx_radar_start_cac(&rwnx_hw->radar, cac_time_ms, rwnx_vif); + #endif + rwnx_send_apm_start_cac_req(rwnx_hw, rwnx_vif, chandef, &cfm); + + if (cfm.status == CO_OK) { + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_chanctx_link(rwnx_vif, cfm.ch_idx, chandef); + if (rwnx_hw->cur_chanctx == rwnx_vif->ch_index) + rwnx_radar_detection_enable(&rwnx_hw->radar, + RWNX_RADAR_DETECT_REPORT, + RWNX_RADAR_RIU); + spin_unlock_bh(&rwnx_hw->cb_lock); + } else { + return -EIO; + } + + return 0; +} + +/** + * @update_ft_ies: Provide updated Fast BSS Transition information to the + * driver. If the SME is in the driver/firmware, this information can be + * used in building Authentication and Reassociation Request frames. + */ +static +int rwnx_cfg80211_update_ft_ies(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_update_ft_ies_params *ftie) +{ + return 0; +} + +/** + * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. + */ +static +int rwnx_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy, + struct net_device *dev, + int32_t rssi_thold, uint32_t rssi_hyst) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + + return rwnx_send_cfg_rssi_req(rwnx_hw, rwnx_vif->vif_index, rssi_thold, rssi_hyst); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) +/** + * + * @channel_switch: initiate channel-switch procedure (with CSA). Driver is + * responsible for veryfing if the switch is possible. Since this is + * inherently tricky driver may decide to disconnect an interface later + * with cfg80211_stop_iface(). This doesn't mean driver can accept + * everything. It should do it's best to verify requests and reject them + * as soon as possible. + */ +int rwnx_cfg80211_channel_switch(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *vif = netdev_priv(dev); + struct rwnx_ipc_elem_var elem; + struct rwnx_bcn *bcn, *bcn_after; + struct rwnx_csa *csa; + u16 csa_oft[BCN_MAX_CSA_CPT]; + u8 *buf; + int i, error = 0; + + //elem init + elem.dma_addr = 0; + + if (vif->ap.csa) + return -EBUSY; + + if (params->n_counter_offsets_beacon > BCN_MAX_CSA_CPT) + return -EINVAL; + + /* Build the new beacon with CSA IE */ + bcn = &vif->ap.bcn; + buf = rwnx_build_bcn(bcn, ¶ms->beacon_csa); + if (!buf) + return -ENOMEM; + + memset(csa_oft, 0, sizeof(csa_oft)); + for (i = 0; i < params->n_counter_offsets_beacon; i++) + { + csa_oft[i] = params->counter_offsets_beacon[i] + bcn->head_len + + bcn->tim_len; + } + + /* If count is set to 0 (i.e anytime after this beacon) force it to 2 */ + if (params->count == 0) { + params->count = 2; + for (i = 0; i < params->n_counter_offsets_beacon; i++) + { + buf[csa_oft[i]] = 2; + } + } + + #if 0 + if ((error = rwnx_ipc_elem_var_allocs(rwnx_hw, &elem, bcn->len, + DMA_TO_DEVICE, buf, NULL, NULL))) { + goto end; + } + #else + error = rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, bcn->len); + if (error) { + goto end; + } + #endif + + /* Build the beacon to use after CSA. It will only be sent to fw once + CSA is over, but do it before sending the beacon as it must be ready + when CSA is finished. */ + csa = kzalloc(sizeof(struct rwnx_csa), GFP_KERNEL); + if (!csa) { + error = -ENOMEM; + goto end; + } + + bcn_after = &csa->bcn; + buf = rwnx_build_bcn(bcn_after, ¶ms->beacon_after); + if (!buf) { + error = -ENOMEM; + rwnx_del_csa(vif); + goto end; + } + + vif->ap.csa = csa; + csa->vif = vif; + csa->chandef = params->chandef; + + /* Send new Beacon. FW will extract channel and count from the beacon */ + error = rwnx_send_bcn_change(rwnx_hw, vif->vif_index, elem.dma_addr, + bcn->len, bcn->head_len, bcn->tim_len, csa_oft); + + if (error) { + rwnx_del_csa(vif); + goto end; + } else { + INIT_WORK(&csa->work, rwnx_csa_finish); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 9, 0) + cfg80211_ch_switch_started_notify(dev, &csa->chandef, 0, params->count, false); +#elif LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION4 + cfg80211_ch_switch_started_notify(dev, &csa->chandef, 0, params->count, false, 0); +#elif LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION2 + cfg80211_ch_switch_started_notify(dev, &csa->chandef, 0, params->count, false); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) + cfg80211_ch_switch_started_notify(dev, &csa->chandef, params->count, params->block_tx); +#else + cfg80211_ch_switch_started_notify(dev, &csa->chandef, params->count); +#endif + + } + +#ifdef CONFIG_BAND_STEERING + vif->ap.band = (enum band_type)((params->chandef).chan)->band; + vif->ap.freq = ((params->chandef).chan)->center_freq; +#endif + + end: + return error; +} +#endif + +#if 0 +/* + * @tdls_mgmt: prepare TDLS action frame packets and forward them to FW + */ + +static int +rwnx_cfg80211_tdls_mgmt(struct wiphy *wiphy, + struct net_device *dev, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) + const u8 *peer, +#else + u8 *peer, +#endif + u8 action_code, + u8 dialog_token, + u16 status_code, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + u32 peer_capability, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) + bool initiator, +#endif + const u8 *buf, + size_t len) +{ + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) + u32 peer_capability = 0; + #endif + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + bool initiator = false; + #endif + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + int ret = 0; + + /* make sure we support TDLS */ + if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) + return -ENOTSUPP; + + /* make sure we are in station mode (and connected) */ + if ((RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_STATION) || + (!rwnx_vif->up) || (!rwnx_vif->sta.ap)) + return -ENOTSUPP; + + /* only one TDLS link is supported */ + if ((action_code == WLAN_TDLS_SETUP_REQUEST) && + (rwnx_vif->sta.tdls_sta) && + (rwnx_vif->tdls_status == TDLS_LINK_ACTIVE)) { + printk("%s: only one TDLS link is supported!\n", __func__); + return -ENOTSUPP; + } + + if ((action_code == WLAN_TDLS_DISCOVERY_REQUEST) && + (rwnx_hw->mod_params->ps_on)) { + printk("%s: discovery request is not supported when " + "power-save is enabled!\n", __func__); + return -ENOTSUPP; + } + + switch (action_code) { + case WLAN_TDLS_SETUP_RESPONSE: + /* only one TDLS link is supported */ + if ((status_code == 0) && + (rwnx_vif->sta.tdls_sta) && + (rwnx_vif->tdls_status == TDLS_LINK_ACTIVE)) { + printk("%s: only one TDLS link is supported!\n", __func__); + status_code = WLAN_STATUS_REQUEST_DECLINED; + } + /* fall-through */ + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_TEARDOWN: + case WLAN_TDLS_DISCOVERY_REQUEST: + case WLAN_TDLS_SETUP_CONFIRM: + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + ret = rwnx_tdls_send_mgmt_packet_data(rwnx_hw, rwnx_vif, peer, action_code, + dialog_token, status_code, peer_capability, initiator, buf, len, 0, NULL); + break; + + default: + printk("%s: Unknown TDLS mgmt/action frame %pM\n", + __func__, peer); + ret = -EOPNOTSUPP; + break; + } + + if (action_code == WLAN_TDLS_SETUP_REQUEST) { + rwnx_vif->tdls_status = TDLS_SETUP_REQ_TX; + } else if (action_code == WLAN_TDLS_SETUP_RESPONSE) { + rwnx_vif->tdls_status = TDLS_SETUP_RSP_TX; + } else if ((action_code == WLAN_TDLS_SETUP_CONFIRM) && (ret == CO_OK)) { + rwnx_vif->tdls_status = TDLS_LINK_ACTIVE; + /* Set TDLS active */ + rwnx_vif->sta.tdls_sta->tdls.active = true; + } + + return ret; +} +#endif +#if 0 +/* + * @tdls_oper: execute TDLS operation + */ +static int +rwnx_cfg80211_tdls_oper(struct wiphy *wiphy, + struct net_device *dev, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) + const u8 *peer, +#else + u8 *peer, +#endif + enum nl80211_tdls_operation oper +) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + int error; + + if (oper != NL80211_TDLS_DISABLE_LINK) + return 0; + + if (!rwnx_vif->sta.tdls_sta) { + printk("%s: TDLS station %pM does not exist\n", __func__, peer); + return -ENOLINK; + } + + if (memcmp(rwnx_vif->sta.tdls_sta->mac_addr, peer, ETH_ALEN) == 0) { + /* Disable Channel Switch */ + if (!rwnx_send_tdls_cancel_chan_switch_req(rwnx_hw, rwnx_vif, + rwnx_vif->sta.tdls_sta, + NULL)) + rwnx_vif->sta.tdls_sta->tdls.chsw_en = false; + + netdev_info(dev, "Del TDLS sta %d (%pM)", + rwnx_vif->sta.tdls_sta->sta_idx, + rwnx_vif->sta.tdls_sta->mac_addr); + /* Ensure that we won't process PS change ind */ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_vif->sta.tdls_sta->ps.active = false; + rwnx_vif->sta.tdls_sta->valid = false; + spin_unlock_bh(&rwnx_hw->cb_lock); + rwnx_txq_sta_deinit(rwnx_hw, rwnx_vif->sta.tdls_sta); + error = rwnx_send_me_sta_del(rwnx_hw, rwnx_vif->sta.tdls_sta->sta_idx, true); + if ((error != 0) && (error != -EPIPE)) + return error; + +#ifdef CONFIG_RWNX_BFMER + // Disable Beamformer if supported + rwnx_bfmer_report_del(rwnx_hw, rwnx_vif->sta.tdls_sta); + rwnx_mu_group_sta_del(rwnx_hw, rwnx_vif->sta.tdls_sta); +#endif /* CONFIG_RWNX_BFMER */ + + /* Set TDLS not active */ + rwnx_vif->sta.tdls_sta->tdls.active = false; +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_vif->sta.tdls_sta); +#endif + // Remove TDLS station + rwnx_vif->tdls_status = TDLS_LINK_IDLE; + rwnx_vif->sta.tdls_sta = NULL; + } + + return 0; +} +#endif +#if 0 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) +/* + * @tdls_channel_switch: enable TDLS channel switch + */ +static int +rwnx_cfg80211_tdls_channel_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_sta *rwnx_sta = rwnx_vif->sta.tdls_sta; + struct tdls_chan_switch_cfm cfm; + int error; + + if ((!rwnx_sta) || (memcmp(addr, rwnx_sta->mac_addr, ETH_ALEN))) { + printk("%s: TDLS station %pM doesn't exist\n", __func__, addr); + return -ENOLINK; + } + + if (!rwnx_sta->tdls.chsw_allowed) { + printk("%s: TDLS station %pM does not support TDLS channel switch\n", __func__, addr); + return -ENOTSUPP; + } + + error = rwnx_send_tdls_chan_switch_req(rwnx_hw, rwnx_vif, rwnx_sta, + rwnx_sta->tdls.initiator, + oper_class, chandef, &cfm); + if (error) + return error; + + if (!cfm.status) { + rwnx_sta->tdls.chsw_en = true; + return 0; + } else { + printk("%s: TDLS channel switch already enabled and only one is supported\n", __func__); + return -EALREADY; + } +} +#endif +#if 0 +/* + * @tdls_cancel_channel_switch: disable TDLS channel switch + */ +static void +rwnx_cfg80211_tdls_cancel_channel_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_sta *rwnx_sta = rwnx_vif->sta.tdls_sta; + struct tdls_cancel_chan_switch_cfm cfm; + + if (!rwnx_sta) + return; + + if (!rwnx_send_tdls_cancel_chan_switch_req(rwnx_hw, rwnx_vif, + rwnx_sta, &cfm)) + rwnx_sta->tdls.chsw_en = false; +} +#endif /* version >= 3.19 */ +#endif +/** + * @change_bss: Modify parameters for a given BSS (mainly for AP mode). + */ +int rwnx_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev, + struct bss_parameters *params) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + int res = -EOPNOTSUPP; + + if (((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP) || + (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO)) && + (params->ap_isolate > -1)) { + + if (params->ap_isolate) + rwnx_vif->ap.flags |= RWNX_AP_ISOLATE; + else + rwnx_vif->ap.flags &= ~RWNX_AP_ISOLATE; + + res = 0; + } + + return res; +} + +enum { + PHYMODE_B, + PHYMODE_G, + PHYMODE_A, + PHYMODE_N, + PHYMODE_AC, + PHYMODE_AX +}; + +static const u32 legacy_map_to_rate[12] = { //Kbps + 1000, 2000, 5500, 11000, 6000, 9000, 12000, 18000, 24000, 36000, 48000, 54000 +}; + +static const u32 he_mcs_map_to_rate[2][3][3][12] = { //NSS/GI/BW/MCS + { //1ss + { //0.8us + { 8600, 17200, 25800, 34400, 51600, 68800, 77400, 86000, 103200, 114700, 129000, 143400}, + {17200, 34400, 51600, 68800, 103200, 137600, 154900, 172100, 206500, 229400, 258100, 286800}, + {36000, 72100, 108100, 144100, 216200, 288200, 324300, 360300, 432400, 480400, 540400, 600400}, + }, + { //1.6us + { 8100, 16300, 24400, 32500, 48800, 65000, 73100, 81300, 97500, 108300, 121900, 135400}, //20M, + {16300, 32500, 48800, 65000, 97500, 130000, 146300, 162500, 195000, 216700, 243800, 270800}, //40M, + {34000, 68100, 102100, 136100, 204200, 272200, 306300, 340300, 408300, 453700, 510400, 567100}, //80M, + }, + { //3.2us + { 7300, 14600, 21900, 29300, 43900, 58500, 65800, 73100, 87800, 97500, 109700, 121900}, + {14600, 29300, 43900, 58500, 87800, 117000, 131600, 146300, 175500, 195000, 219400, 243800}, + {30600, 61300, 91900, 122500, 183800, 245000, 275600, 306300, 367500, 408300, 459400, 510400}, + } + }, + { //2ss + { //0.8us + { 17200, 34400, 51600, 68800, 103200, 137600, 154900, 172100, 206500, 229400, 258100, 286800}, //242tone + { 34400, 68800, 103200, 137600, 206500, 275300, 309700, 344100, 412900, 458800, 516200, 573500}, //484tone + { 72100, 144100, 216200, 288200, 432400, 576500, 648500, 720600, 864700, 960700, 1080900, 1201000}, //996tone + }, + { // 1.6us + { 16300, 32500, 48800, 65000, 97500, 130000, 146300, 162500, 195000, 216700, 243800, 270800}, + { 32500, 65000, 97500, 130000, 195000, 260000, 292500, 325000, 390000, 433300, 487500, 541700}, + { 68100, 136100, 204200, 272200, 408300, 544400, 612500, 680600, 816700, 907400, 1020800, 1134200}, + }, + { // 3.2us + { 14600, 29300, 43900, 58500, 87800, 117000, 131600, 146300, 175500, 195000, 219400, 243800}, + { 29300, 58500, 87800, 117000, 175500, 234000, 263300, 292500, 351000, 390000, 438800, 487500}, + { 61300, 122500, 183800, 245000, 367500, 490000, 551300, 612500, 735000, 816600, 918800, 1020800} + } + } +}; + + +static const u32 vht_mcs_map_to_rate[2][2][3][10] = { //NSS//GI/BW/MCS + { // 1ss + { //LONG GI + { 6500, 13000, 19500, 26000, 39000, 52000, 58500, 65000, 78000}, //20M + { 13500, 27000, 40500, 54000, 81000, 108000, 121500, 135000, 162000, 180000}, //40M + { 29300, 58500, 87800, 117000, 175500, 234000, 263300, 292500, 251000, 390000}, //80M + + }, + + { //SHORT GI + { 7200, 14400, 21700, 28900, 43300, 57800, 65000, 72200, 86700}, + { 15000, 30000, 45000, 60000, 90000, 120000, 135000, 150000, 180000, 200000}, + { 32500, 65000, 97500, 130000, 195000, 260000, 292500, 325000, 390000, 433300}, + + } + }, + { //2ss + { //LONG GI + { 13000, 26000, 39000, 52000, 78000, 104000, 117000, 130000, 156000}, //20M + { 27000, 54000, 81000, 108000, 162000, 216000, 243000, 270000, 324000, 360000}, //40M + { 58500, 117000, 175500, 234000, 351000, 468000, 526500, 585000, 702000, 780000}, //80M + }, + { //SHORT GI + { 14400, 28900, 43300, 57800, 86700, 115600, 130000, 144400, 173300}, //20M + { 30000, 60000, 90000, 120000, 180000, 240000, 270000, 300000, 360000, 400000}, //40M + { 65000, 130000, 195000, 260000, 390000, 520000, 585000, 650000, 780000, 866700}, //80M + } + } +}; + + +static const u32 ht_mcs_map_to_rate[2][2][3][8] = { //NSS//GI/BW/MCS + { // 1SS + { //LONG GI + {6500, 13000, 19500, 26000, 39000, 52000, 58500, 65000}, //20M + {13500, 27000, 40500, 54000, 81000, 108000, 121500, 135000}, //40M + }, + { //SHORT GI + {7200, 14400, 21700, 28900, 43300, 57800, 65000, 72200}, + {15000, 30000, 45000, 60000, 90000, 120000, 135000, 150000}, + } + }, + { // 2SS + { //LONG GI + { 13000, 26000, 39000, 52000, 78000, 104000, 117000, 130000}, //20M + { 27000, 54000, 81000, 108000, 162000, 216000, 243000, 270000}, //40M + }, + { //SHORT GI + { 14400, 28900, 43300, 57800, 86700, 115600, 130000, 144400}, //20M + { 30000, 60000, 90000, 120000, 180000, 240000, 270000, 300000}, //40M + } + + }, +}; + + +int rwnx_fill_station_info(struct rwnx_sta *sta, struct rwnx_vif *vif, + struct station_info *sinfo, u8 *phymode, u32 *tx_phyrate, u32 *rx_phyrate) +{ + struct rwnx_sta_stats *stats = &sta->stats; + struct rx_vector_1 *rx_vect1 = &stats->last_rx.rx_vect1; + union rwnx_rate_ctrl_info *rate_info; + struct mm_get_sta_info_cfm cfm; + u8 phymode_local = 0, mcs; + u32 tx_phyrate_local = 0; + u32 rx_phyrate_local = 0; + + rwnx_send_get_sta_info_req(vif->rwnx_hw, sta->sta_idx, &cfm); + sinfo->tx_failed = cfm.txfailed; + rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info; + + //printk("chan time=%d, busy=%d, tx succ=%d, tx_fail=%d\n", cfm.chan_busy_time, cfm.chan_time, cfm.ack_succ_stat, cfm.ack_fail_stat); + stats->last_chan_busy_time = cfm.chan_busy_time; + stats->last_chan_time = cfm.chan_time; + stats->tx_ack_succ_stat = cfm.ack_succ_stat; + stats->tx_ack_fail_stat = cfm.ack_fail_stat; + stats->last_chan_tx_busy_time = cfm.chan_tx_busy_time; + + switch (rate_info->formatModTx) { + case FORMATMOD_NON_HT: + sinfo->txrate.flags = 0; + sinfo->txrate.legacy = tx_legrates_lut_rate[rate_info->mcsIndexTx]; + sinfo->txrate.nss = 1; + tx_phyrate_local = legacy_map_to_rate[rate_info->mcsIndexTx]; + if(sta->band == NL80211_BAND_2GHZ) + phymode_local = PHYMODE_B; + break; + case FORMATMOD_NON_HT_DUP_OFDM: + sinfo->txrate.flags = 0; + sinfo->txrate.legacy = tx_legrates_lut_rate[rate_info->mcsIndexTx]; + sinfo->txrate.nss = 1; + tx_phyrate_local = legacy_map_to_rate[rate_info->mcsIndexTx]; + if(sta->band == NL80211_BAND_2GHZ) + phymode_local = PHYMODE_G; + else + phymode_local = PHYMODE_A; + break; + case FORMATMOD_HT_MF: + case FORMATMOD_HT_GF: + sinfo->txrate.flags = RATE_INFO_FLAGS_MCS; + sinfo->txrate.mcs = rate_info->mcsIndexTx & 0x7; + sinfo->txrate.nss = ((rate_info->mcsIndexTx >> 3) & 0x7) + 1; + phymode_local = PHYMODE_N; + + tx_phyrate_local = ht_mcs_map_to_rate[sinfo->txrate.nss - 1][rate_info->giAndPreTypeTx][rate_info->bwTx][sinfo->txrate.mcs]; + break; + case FORMATMOD_VHT: + sinfo->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; + sinfo->txrate.mcs = rate_info->mcsIndexTx & 0xF; + sinfo->txrate.nss = ((rate_info->mcsIndexTx >> 4) & 0x7) + 1; + tx_phyrate_local = vht_mcs_map_to_rate[sinfo->txrate.nss - 1][rate_info->giAndPreTypeTx][rate_info->bwTx][sinfo->txrate.mcs]; + //printk("phyrate: %d,%d,%d,%d\n", sinfo->txrate.nss - 1, rate_info->giAndPreTypeTx, rate_info->bwTx, sinfo->txrate.mcs); + phymode_local = PHYMODE_AC; + break; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + case FORMATMOD_HE_MU: + case FORMATMOD_HE_SU: + case FORMATMOD_HE_ER: + sinfo->txrate.flags = RATE_INFO_FLAGS_HE_MCS; + sinfo->txrate.mcs = rate_info->mcsIndexTx & 0xF; + sinfo->txrate.nss = ((rate_info->mcsIndexTx >> 4) & 0x7) + 1; + tx_phyrate_local = he_mcs_map_to_rate[sinfo->txrate.nss - 1][rate_info->giAndPreTypeTx][rate_info->bwTx][sinfo->txrate.mcs]; + phymode_local = PHYMODE_AX; + //printk("phyrate: %d,%d,%d,%d\n", sinfo->txrate.nss - 1, rate_info->giAndPreTypeTx, rate_info->bwTx, sinfo->txrate.mcs); + break; +#else + case FORMATMOD_HE_MU: + case FORMATMOD_HE_SU: + case FORMATMOD_HE_ER: + sinfo->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; + sinfo->txrate.mcs = ((rate_info->mcsIndexTx & 0xF) > 9 ? 9 : (rate_info->mcsIndexTx & 0xF)); + sinfo->txrate.nss = ((rate_info->mcsIndexTx >> 4) & 0x7) + 1; + tx_phyrate_local = he_mcs_map_to_rate[sinfo->txrate.nss - 1][rate_info->giAndPreTypeTx][rate_info->bwTx][sinfo->txrate.mcs]; + phymode_local = PHYMODE_AX; + break; +#endif + default: + return -EINVAL; + } + + if(phymode) + *phymode = phymode_local; + if(tx_phyrate) { + *tx_phyrate = tx_phyrate_local; + AICWFDBG(LOGINFO, "phyrate=%d\n", *tx_phyrate); + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) + switch (rate_info->bwTx) { + case PHY_CHNL_BW_20: + sinfo->txrate.bw = RATE_INFO_BW_20; + break; + case PHY_CHNL_BW_40: + sinfo->txrate.bw = RATE_INFO_BW_40; + break; + case PHY_CHNL_BW_80: + sinfo->txrate.bw = RATE_INFO_BW_80; + break; + case PHY_CHNL_BW_160: + sinfo->txrate.bw = RATE_INFO_BW_160; + break; + default: +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + sinfo->txrate.bw = RATE_INFO_BW_HE_RU; +#else + sinfo->txrate.bw = RATE_INFO_BW_20; +#endif + break; + } +#endif + + //sinfo->txrate.nss = 1; + sinfo->filled |= (BIT(NL80211_STA_INFO_TX_BITRATE) | BIT(NL80211_STA_INFO_TX_FAILED)); + sinfo->inactive_time = jiffies_to_msecs(jiffies - vif->rwnx_hw->stats.last_tx); + sinfo->rx_bytes = vif->net_stats.rx_bytes; + sinfo->tx_bytes = vif->net_stats.tx_bytes; + sinfo->tx_packets = vif->net_stats.tx_packets; + sinfo->rx_packets = vif->net_stats.rx_packets; + sinfo->signal = (s8)cfm.rssi; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) + switch (rx_vect1->ch_bw) { + case PHY_CHNL_BW_20: + sinfo->rxrate.bw = RATE_INFO_BW_20; + break; + case PHY_CHNL_BW_40: + sinfo->rxrate.bw = RATE_INFO_BW_40; + break; + case PHY_CHNL_BW_80: + sinfo->rxrate.bw = RATE_INFO_BW_80; + break; + case PHY_CHNL_BW_160: + sinfo->rxrate.bw = RATE_INFO_BW_160; + break; + default: + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + sinfo->rxrate.bw = RATE_INFO_BW_HE_RU; + #else + sinfo->rxrate.bw = RATE_INFO_BW_20; + #endif + break; + } + #endif + + switch (rx_vect1->format_mod) { + case FORMATMOD_NON_HT: + case FORMATMOD_NON_HT_DUP_OFDM: + sinfo->rxrate.flags = 0; + sinfo->rxrate.legacy = legrates_lut[rx_vect1->leg_rate].rate; + sinfo->rxrate.nss = 1; + rx_phyrate_local = legrates_lut[rx_vect1->leg_rate].rate; + break; + case FORMATMOD_HT_MF: + case FORMATMOD_HT_GF: + sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS; + if (rx_vect1->ht.short_gi) + sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + sinfo->rxrate.mcs = rx_vect1->ht.mcs; + sinfo->rxrate.nss = rx_vect1->ht.num_extn_ss + 1; + + if(rx_vect1->ht.mcs == 32) {//mcs 32 + if(rx_vect1->vht.short_gi) + rx_phyrate_local = 6700; + else + rx_phyrate_local = 6000; + } else { + if(sinfo->rxrate.mcs >= 8) + mcs = sinfo->rxrate.mcs - 8; + else + mcs = sinfo->rxrate.mcs; + rx_phyrate_local = ht_mcs_map_to_rate[rx_vect1->ht.num_extn_ss][rx_vect1->ht.short_gi][rx_vect1->ch_bw][mcs]; + } + break; + case FORMATMOD_VHT: + sinfo->rxrate.flags = RATE_INFO_FLAGS_VHT_MCS; + if (rx_vect1->vht.short_gi) + sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + sinfo->rxrate.mcs = rx_vect1->vht.mcs; + sinfo->rxrate.nss = rx_vect1->vht.nss + 1; + rx_phyrate_local = vht_mcs_map_to_rate[rx_vect1->vht.nss][rx_vect1->vht.short_gi][rx_vect1->ch_bw][sinfo->rxrate.mcs]; + //printk("rx:%d,%d,%d,%d\n", rx_vect1->vht.nss, rx_vect1->vht.short_gi, rx_vect1->ch_bw, sinfo->rxrate.mcs); + break; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + case FORMATMOD_HE_MU: + sinfo->rxrate.he_ru_alloc = rx_vect1->he.ru_size; + case FORMATMOD_HE_SU: + case FORMATMOD_HE_ER: + sinfo->rxrate.flags = RATE_INFO_FLAGS_HE_MCS; + sinfo->rxrate.mcs = rx_vect1->he.mcs; + sinfo->rxrate.he_gi = rx_vect1->he.gi_type; + if (rx_vect1->he.gi_type) + sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + sinfo->rxrate.he_dcm = rx_vect1->he.dcm; + sinfo->rxrate.nss = rx_vect1->he.nss + 1; + rx_phyrate_local = he_mcs_map_to_rate[rx_vect1->he.nss][rx_vect1->he.gi_type][rx_vect1->ch_bw][sinfo->rxrate.mcs]; + break; +#else + //kernel not support he + case FORMATMOD_HE_MU: + case FORMATMOD_HE_SU: + case FORMATMOD_HE_ER: + sinfo->rxrate.flags = RATE_INFO_FLAGS_VHT_MCS; + sinfo->rxrate.mcs = (rx_vect1->he.mcs > 9 ? 9 : rx_vect1->he.mcs); + sinfo->rxrate.nss = rx_vect1->he.nss + 1; + break; +#endif + default: + return -EINVAL; + } + + if(rx_phyrate) { + *rx_phyrate = rx_phyrate_local; + AICWFDBG(LOGINFO, "rx_phyrate=%d\n", *rx_phyrate); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + sinfo->filled |= (STATION_INFO_INACTIVE_TIME | + STATION_INFO_RX_BYTES64 | + STATION_INFO_TX_BYTES64 | + STATION_INFO_RX_PACKETS | + STATION_INFO_TX_PACKETS | + STATION_INFO_SIGNAL | + STATION_INFO_RX_BITRATE); +#else + sinfo->filled |= (BIT(NL80211_STA_INFO_INACTIVE_TIME) | + BIT(NL80211_STA_INFO_RX_BYTES64) | + BIT(NL80211_STA_INFO_TX_BYTES64) | + BIT(NL80211_STA_INFO_RX_PACKETS) | + BIT(NL80211_STA_INFO_TX_PACKETS) | + BIT(NL80211_STA_INFO_SIGNAL) | + BIT(NL80211_STA_INFO_RX_BITRATE)); +#endif + + return 0; +} + + + +/** + * @get_station: get station information for the station identified by @mac + */ +static int rwnx_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *dev, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + u8 *mac, +#else + const u8 *mac, +#endif + struct station_info *sinfo) +{ + struct rwnx_vif *vif = netdev_priv(dev); + struct rwnx_sta *sta = NULL; + + if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_MONITOR) + return -EINVAL; + else if ((RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_STATION) || + (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_P2P_CLIENT)) { + if (vif->sta.ap && ether_addr_equal(vif->sta.ap->mac_addr, mac)) + sta = vif->sta.ap; + } + else + { + struct rwnx_sta *sta_iter; + spin_lock_bh(&vif->rwnx_hw->cb_lock); + list_for_each_entry(sta_iter, &vif->ap.sta_list, list) { + if (sta_iter->valid && ether_addr_equal(sta_iter->mac_addr, mac)) { + sta = sta_iter; + break; + } + } + spin_unlock_bh(&vif->rwnx_hw->cb_lock); + } + + if (sta) + return rwnx_fill_station_info(sta, vif, sinfo, NULL, NULL, NULL); + + return -ENOENT; +} + + +/** + * @dump_station: dump station callback -- resume dump at index @idx + */ +static int rwnx_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + //struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_sta *sta_iter, *sta = NULL; + //struct mesh_peer_info_cfm peer_info_cfm; + int i = 0; + + AICWFDBG(LOGDEBUG, "%s enter \r\n", __func__); + +#if 0 + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT){ + AICWFDBG(LOGERROR, "%s NOTSUPP \r\n", __func__); + return -ENOTSUPP; + } +#endif + + list_for_each_entry(sta_iter, &rwnx_vif->ap.sta_list, list) { + if (i < idx) { + i++; + continue; + } + + sta = sta_iter; + break; + } + + if (sta == NULL) + return -ENOENT; + + /* Copy peer MAC address */ + memcpy(mac, &sta->mac_addr, ETH_ALEN); + +#if 0 + /* Forward the information to the UMAC */ + if (rwnx_send_mesh_peer_info_req(rwnx_hw, rwnx_vif, sta->sta_idx, + &peer_info_cfm)) + return -ENOMEM; + + /* Copy peer MAC address */ + memcpy(mac, &sta->mac_addr, ETH_ALEN); + + /* Fill station information */ + sinfo->llid = peer_info_cfm.local_link_id; + sinfo->plid = peer_info_cfm.peer_link_id; + sinfo->plink_state = peer_info_cfm.link_state; + sinfo->local_pm = peer_info_cfm.local_ps_mode; + sinfo->peer_pm = peer_info_cfm.peer_ps_mode; + sinfo->nonpeer_pm = peer_info_cfm.non_peer_ps_mode; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + sinfo->filled = (STATION_INFO_LLID | + STATION_INFO_PLID | + STATION_INFO_PLINK_STATE | + STATION_INFO_LOCAL_PM | + STATION_INFO_PEER_PM | + STATION_INFO_NONPEER_PM); +#else + sinfo->filled = (BIT(NL80211_STA_INFO_LLID) | + BIT(NL80211_STA_INFO_PLID) | + BIT(NL80211_STA_INFO_PLINK_STATE) | + BIT(NL80211_STA_INFO_LOCAL_PM) | + BIT(NL80211_STA_INFO_PEER_PM) | + BIT(NL80211_STA_INFO_NONPEER_PM)); +#endif +#endif + + if (sta){ + rwnx_fill_station_info(sta, rwnx_vif, sinfo, NULL, NULL, NULL); + } + + return 0; +} + +/** + * @add_mpath: add a fixed mesh path + */ +static int rwnx_cfg80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + u8 *dst, + u8 *next_hop +#else + const u8 *dst, + const u8 *next_hop +#endif +) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct mesh_path_update_cfm cfm; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + return rwnx_send_mesh_path_update_req(rwnx_hw, rwnx_vif, dst, next_hop, &cfm); +} + +/** + * @del_mpath: delete a given mesh path + */ +static int rwnx_cfg80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + u8 *dst +#else + const u8 *dst +#endif +) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct mesh_path_update_cfm cfm; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + return rwnx_send_mesh_path_update_req(rwnx_hw, rwnx_vif, dst, NULL, &cfm); +} + +/** + * @change_mpath: change a given mesh path + */ +static int rwnx_cfg80211_change_mpath(struct wiphy *wiphy, struct net_device *dev, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + u8 *dst, + u8 *next_hop +#else + const u8 *dst, + const u8 *next_hop +#endif +) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct mesh_path_update_cfm cfm; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + return rwnx_send_mesh_path_update_req(rwnx_hw, rwnx_vif, dst, next_hop, &cfm); +} + +/** + * @get_mpath: get a mesh path for the given parameters + */ +static int rwnx_cfg80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop, struct mpath_info *pinfo) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_mesh_path *mesh_path = NULL; + struct rwnx_mesh_path *cur; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + list_for_each_entry(cur, &rwnx_vif->ap.mpath_list, list) { + /* Compare the path target address and the provided destination address */ + if (memcmp(dst, &cur->tgt_mac_addr, ETH_ALEN)) { + continue; + } + + mesh_path = cur; + break; + } + + if (mesh_path == NULL) + return -ENOENT; + + /* Copy next HOP MAC address */ + if (mesh_path->p_nhop_sta) + memcpy(next_hop, &mesh_path->p_nhop_sta->mac_addr, ETH_ALEN); + + /* Fill path information */ + pinfo->filled = 0; + pinfo->generation = rwnx_vif->ap.generation; + + return 0; +} + +/** + * @dump_mpath: dump mesh path callback -- resume dump at index @idx + */ +static int rwnx_cfg80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *next_hop, + struct mpath_info *pinfo) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_mesh_path *mesh_path = NULL; + struct rwnx_mesh_path *cur; + int i = 0; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + list_for_each_entry(cur, &rwnx_vif->ap.mpath_list, list) { + if (i < idx) { + i++; + continue; + } + + mesh_path = cur; + break; + } + + if (mesh_path == NULL) + return -ENOENT; + + /* Copy target and next hop MAC address */ + memcpy(dst, &mesh_path->tgt_mac_addr, ETH_ALEN); + if (mesh_path->p_nhop_sta) + memcpy(next_hop, &mesh_path->p_nhop_sta->mac_addr, ETH_ALEN); + + /* Fill path information */ + pinfo->filled = 0; + pinfo->generation = rwnx_vif->ap.generation; + + return 0; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) +/** + * @get_mpp: get a mesh proxy path for the given parameters + */ +static int rwnx_cfg80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *mpp, struct mpath_info *pinfo) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_mesh_proxy *mesh_proxy = NULL; + struct rwnx_mesh_proxy *cur; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + list_for_each_entry(cur, &rwnx_vif->ap.proxy_list, list) { + if (cur->local) { + continue; + } + + /* Compare the path target address and the provided destination address */ + if (memcmp(dst, &cur->ext_sta_addr, ETH_ALEN)) { + continue; + } + + mesh_proxy = cur; + break; + } + + if (mesh_proxy == NULL) + return -ENOENT; + + memcpy(mpp, &mesh_proxy->proxy_addr, ETH_ALEN); + + /* Fill path information */ + pinfo->filled = 0; + pinfo->generation = rwnx_vif->ap.generation; + + return 0; +} + +/** + * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx + */ +static int rwnx_cfg80211_dump_mpp(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_mesh_proxy *mesh_proxy = NULL; + struct rwnx_mesh_proxy *cur; + int i = 0; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + list_for_each_entry(cur, &rwnx_vif->ap.proxy_list, list) { + if (cur->local) { + continue; + } + + if (i < idx) { + i++; + continue; + } + + mesh_proxy = cur; + break; + } + + if (mesh_proxy == NULL) + return -ENOENT; + + /* Copy target MAC address */ + memcpy(dst, &mesh_proxy->ext_sta_addr, ETH_ALEN); + memcpy(mpp, &mesh_proxy->proxy_addr, ETH_ALEN); + + /* Fill path information */ + pinfo->filled = 0; + pinfo->generation = rwnx_vif->ap.generation; + + return 0; +} +#endif /* version >= 3.19 */ + +/** + * @get_mesh_config: Get the current mesh configuration + */ +static int rwnx_cfg80211_get_mesh_config(struct wiphy *wiphy, struct net_device *dev, + struct mesh_config *conf) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + return 0; +} + +/** + * @update_mesh_config: Update mesh parameters on a running mesh. + */ +static int rwnx_cfg80211_update_mesh_config(struct wiphy *wiphy, struct net_device *dev, + u32 mask, const struct mesh_config *nconf) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct mesh_update_cfm cfm; + int status; + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + if (mask & CO_BIT(NL80211_MESHCONF_POWER_MODE - 1)) { + rwnx_vif->ap.next_mesh_pm = nconf->power_mode; + + if (!list_empty(&rwnx_vif->ap.sta_list)) { + // If there are mesh links we don't want to update the power mode + // It will be updated with rwnx_update_mesh_power_mode() when the + // ps mode of a link is updated or when a new link is added/removed + mask &= ~BIT(NL80211_MESHCONF_POWER_MODE - 1); + + if (!mask) + return 0; + } + } + + status = rwnx_send_mesh_update_req(rwnx_hw, rwnx_vif, mask, nconf, &cfm); + + if (!status && (cfm.status != 0)) + status = -EINVAL; + + return status; +} + +/** + * @join_mesh: join the mesh network with the specified parameters + * (invoked with the wireless_dev mutex held) + */ +static int rwnx_cfg80211_join_mesh(struct wiphy *wiphy, struct net_device *dev, + const struct mesh_config *conf, const struct mesh_setup *setup) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct mesh_start_cfm mesh_start_cfm; + int error = 0; + u8 txq_status = 0; + /* STA for BC/MC traffic */ + struct rwnx_sta *sta; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT) + return -ENOTSUPP; + + /* Forward the information to the UMAC */ + if ((error = rwnx_send_mesh_start_req(rwnx_hw, rwnx_vif, conf, setup, &mesh_start_cfm))) { + return error; + } + + /* Check the status */ + switch (mesh_start_cfm.status) { + case CO_OK: + rwnx_vif->ap.bcmc_index = mesh_start_cfm.bcmc_idx; + rwnx_vif->ap.flags = 0; + rwnx_vif->use_4addr = true; + rwnx_vif->user_mpm = setup->user_mpm; + + sta = &rwnx_hw->sta_table[mesh_start_cfm.bcmc_idx]; + sta->valid = true; + sta->aid = 0; + sta->sta_idx = mesh_start_cfm.bcmc_idx; + sta->ch_idx = mesh_start_cfm.ch_idx; + sta->vif_idx = rwnx_vif->vif_index; + sta->qos = true; + sta->acm = 0; + sta->ps.active = false; + rwnx_mu_group_sta_init(sta, NULL); + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_chanctx_link(rwnx_vif, mesh_start_cfm.ch_idx, + (struct cfg80211_chan_def *)(&setup->chandef)); + if (rwnx_hw->cur_chanctx != mesh_start_cfm.ch_idx) { + txq_status = RWNX_TXQ_STOP_CHAN; + } + rwnx_txq_vif_init(rwnx_hw, rwnx_vif, txq_status); + spin_unlock_bh(&rwnx_hw->cb_lock); + + netif_tx_start_all_queues(dev); + netif_carrier_on(dev); + + /* If the AP channel is already the active, we probably skip radar + activation on MM_CHANNEL_SWITCH_IND (unless another vif use this + ctxt). In anycase retest if radar detection must be activated + */ + if (rwnx_hw->cur_chanctx == mesh_start_cfm.ch_idx) { + rwnx_radar_detection_enable_on_cur_channel(rwnx_hw); + } + break; + + case CO_BUSY: + error = -EINPROGRESS; + break; + + default: + error = -EIO; + break; + } + + /* Print information about the operation */ + if (error) { + netdev_info(dev, "Failed to start MP (%d)", error); + } else { + netdev_info(dev, "MP started: ch=%d, bcmc_idx=%d", + rwnx_vif->ch_index, rwnx_vif->ap.bcmc_index); + } + + return error; +} + +/** + * @leave_mesh: leave the current mesh network + * (invoked with the wireless_dev mutex held) + */ +static int rwnx_cfg80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct mesh_stop_cfm mesh_stop_cfm; + int error = 0; + + error = rwnx_send_mesh_stop_req(rwnx_hw, rwnx_vif, &mesh_stop_cfm); + + if (error == 0) { + /* Check the status */ + switch (mesh_stop_cfm.status) { + case CO_OK: + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_chanctx_unlink(rwnx_vif); + rwnx_radar_cancel_cac(&rwnx_hw->radar); + spin_unlock_bh(&rwnx_hw->cb_lock); + /* delete BC/MC STA */ + rwnx_txq_vif_deinit(rwnx_hw, rwnx_vif); + rwnx_del_bcn(&rwnx_vif->ap.bcn); + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + + break; + + default: + error = -EIO; + break; + } + } + + if (error) { + netdev_info(dev, "Failed to stop MP"); + } else { + netdev_info(dev, "MP Stopped"); + } + + return 0; +} + +static struct cfg80211_ops rwnx_cfg80211_ops = { + .add_virtual_intf = rwnx_cfg80211_add_iface, + .del_virtual_intf = rwnx_cfg80211_del_iface, + .change_virtual_intf = rwnx_cfg80211_change_iface, + .start_p2p_device = rwnx_cfgp2p_start_p2p_device, + .stop_p2p_device = rwnx_cfgp2p_stop_p2p_device, + .scan = rwnx_cfg80211_scan, + .connect = rwnx_cfg80211_connect, + .disconnect = rwnx_cfg80211_disconnect, + .add_key = rwnx_cfg80211_add_key, + .get_key = rwnx_cfg80211_get_key, + .del_key = rwnx_cfg80211_del_key, + .set_default_key = rwnx_cfg80211_set_default_key, + .set_default_mgmt_key = rwnx_cfg80211_set_default_mgmt_key, + .add_station = rwnx_cfg80211_add_station, + .del_station = rwnx_cfg80211_del_station_compat, + .change_station = rwnx_cfg80211_change_station, + .mgmt_tx = rwnx_cfg80211_mgmt_tx, + .start_ap = rwnx_cfg80211_start_ap, + .change_beacon = rwnx_cfg80211_change_beacon, + .stop_ap = rwnx_cfg80211_stop_ap, + .set_monitor_channel = rwnx_cfg80211_set_monitor_channel, + .probe_client = rwnx_cfg80211_probe_client, +// .mgmt_frame_register = rwnx_cfg80211_mgmt_frame_register, + .set_wiphy_params = rwnx_cfg80211_set_wiphy_params, + .set_txq_params = rwnx_cfg80211_set_txq_params, + .set_tx_power = rwnx_cfg80211_set_tx_power, +// .get_tx_power = rwnx_cfg80211_get_tx_power, + .set_power_mgmt = rwnx_cfg80211_set_power_mgmt, + .get_station = rwnx_cfg80211_get_station, + .remain_on_channel = rwnx_cfg80211_remain_on_channel, + .cancel_remain_on_channel = rwnx_cfg80211_cancel_remain_on_channel, + .dump_survey = rwnx_cfg80211_dump_survey, + .get_channel = rwnx_cfg80211_get_channel, + .start_radar_detection = rwnx_cfg80211_start_radar_detection, + .update_ft_ies = rwnx_cfg80211_update_ft_ies, + .set_cqm_rssi_config = rwnx_cfg80211_set_cqm_rssi_config, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) + .channel_switch = rwnx_cfg80211_channel_switch, +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + //.tdls_channel_switch = rwnx_cfg80211_tdls_channel_switch, + //.tdls_cancel_channel_switch = rwnx_cfg80211_tdls_cancel_channel_switch, +#endif + //.tdls_mgmt = rwnx_cfg80211_tdls_mgmt, + //.tdls_oper = rwnx_cfg80211_tdls_oper, + .change_bss = rwnx_cfg80211_change_bss, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL) + .external_auth = rwnx_cfg80211_external_auth, +#endif +#ifdef CONFIG_SCHED_SCAN + .sched_scan_start = rwnx_cfg80211_sched_scan_start, + .sched_scan_stop = rwnx_cfg80211_sched_scan_stop, +#endif + +}; + + +/********************************************************************* + * Init/Exit functions + *********************************************************************/ +static void rwnx_wdev_unregister(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_vif *rwnx_vif, *tmp; + + rtnl_lock(); + list_for_each_entry_safe(rwnx_vif, tmp, &rwnx_hw->vifs, list) { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + rwnx_cfg80211_del_iface(rwnx_hw->wiphy, &rwnx_vif->wdev); + #else + rwnx_cfg80211_del_iface(rwnx_hw->wiphy, rwnx_vif->ndev); + #endif + } + rtnl_unlock(); +} + +static void rwnx_set_vers(struct rwnx_hw *rwnx_hw) +{ + u32 vers = rwnx_hw->version_cfm.version_lmac; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + snprintf(rwnx_hw->wiphy->fw_version, + sizeof(rwnx_hw->wiphy->fw_version), "%d.%d.%d.%d", + (vers & (0xff << 24)) >> 24, (vers & (0xff << 16)) >> 16, + (vers & (0xff << 8)) >> 8, (vers & (0xff << 0)) >> 0); +} + +static void rwnx_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + + printk("%s Enter\r\n", __func__); + + // For now trust all initiator +#ifdef CONFIG_RADAR_OR_IR_DETECT + if(request->dfs_region != 0) + rwnx_radar_set_domain(&rwnx_hw->radar, request->dfs_region); +#else + rwnx_radar_set_domain(&rwnx_hw->radar, request->dfs_region); +#endif + + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 || + ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && testmode == 0)){ + rwnx_send_me_chan_config_req(rwnx_hw, &request->alpha2[0]); + } +} + +static void rwnx_enable_mesh(struct rwnx_hw *rwnx_hw) +{ + struct wiphy *wiphy = rwnx_hw->wiphy; + + if (!rwnx_mod_params.mesh) + return; + + rwnx_cfg80211_ops.get_station = rwnx_cfg80211_get_station; + rwnx_cfg80211_ops.dump_station = rwnx_cfg80211_dump_station; + rwnx_cfg80211_ops.add_mpath = rwnx_cfg80211_add_mpath; + rwnx_cfg80211_ops.del_mpath = rwnx_cfg80211_del_mpath; + rwnx_cfg80211_ops.change_mpath = rwnx_cfg80211_change_mpath; + rwnx_cfg80211_ops.get_mpath = rwnx_cfg80211_get_mpath; + rwnx_cfg80211_ops.dump_mpath = rwnx_cfg80211_dump_mpath; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + rwnx_cfg80211_ops.get_mpp = rwnx_cfg80211_get_mpp; + rwnx_cfg80211_ops.dump_mpp = rwnx_cfg80211_dump_mpp; +#endif + rwnx_cfg80211_ops.get_mesh_config = rwnx_cfg80211_get_mesh_config; + rwnx_cfg80211_ops.update_mesh_config = rwnx_cfg80211_update_mesh_config; + rwnx_cfg80211_ops.join_mesh = rwnx_cfg80211_join_mesh; + rwnx_cfg80211_ops.leave_mesh = rwnx_cfg80211_leave_mesh; + + wiphy->flags |= (WIPHY_FLAG_MESH_AUTH | WIPHY_FLAG_IBSS_RSN); + wiphy->features |= NL80211_FEATURE_USERSPACE_MPM; + wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT); + + rwnx_limits[0].types |= BIT(NL80211_IFTYPE_MESH_POINT); + rwnx_limits_dfs[0].types |= BIT(NL80211_IFTYPE_MESH_POINT); +} + +extern int rwnx_init_aic(struct rwnx_hw *rwnx_hw); +#ifdef AICWF_USB_SUPPORT +u32 patch_tbl[][2] = +{ + {0x0044, 0x00000002}, //hosttype + {0x0048, 0x00000060}, + {0x004c, 0x00000046}, + {0x0050, 0x00000000}, //ipc base + {0x0054, 0x001a0000}, //buf base + {0x0058, 0x001a0140}, //desc base + {0x005c, 0x00001020}, //desc size + {0x0060, 0x001a1020}, //pkt base + {0x0064, 0x000207e0}, //pkt size + {0x0068, 0x00000008}, + {0x006c, 0x00000040}, + {0x0070, 0x00000040}, + {0x0074, 0x00000020}, + {0x0078, 0x00000000}, + {0x007c, 0x00000040}, + {0x0080, 0x00190000}, + {0x0084, 0x0000fc00},//63kB + {0x0088, 0x0019fc00}, + #ifdef USE_5G + {0x00b4, 0xf3010001}, + #else + {0x00b4, 0xf3010000}, + #endif + //{0x00b8, 0x0f010a01}, //tx enhanced en, tx enhanced lo rate + //{0x00bc, 0x00000008}, //tx enhanced hi rate +}; +#else +//#ifdef CONFIG_ROM_PATCH_EN +#if 0 +u32 patch_tbl[][2] = +{ + {0x0044, 0x02000001}, + {0x0058, 0x001a0000}, + {0x005c, 0x00002020}, + {0x0060, 0x001a2020}, //pkt base + {0x0064, 0x00021820}, //pkt size + {0x0080, 0x00190000}, + {0x0084, 0x0000fc00},//63kB + {0x0088, 0x0019fc00} +}; +#else +u32 patch_tbl[][2] = +{ + #ifdef USE_5G + {0x00b4, 0xf3010001}, + #else + {0x00b4, 0xf3010000}, + #endif +}; +#endif +#endif + +u32 patch_tbl_1[14][2] = +{ + {0x171b24, 0x1c4021}, //61 + {0x171c00, 0x1c40b1}, //116 + {0x172124, 0x1c43ed}, //12*8 + 1720c4 + {0x171bfc, 0x1c4849}, //115, 171a30 + 115 * 4 + {0x171ee4, 0x1c4941}, //301 + {0x171ee8, 0x1c4b09}, //302 + {0x172134, 0x1c4d65}, //14/15/16/17/18 * 8 + 1720c4{0x172134, 0x1c4d65}, + {0x17213c, 0x1c4d65}, + {0x172144, 0x1c4d65}, + {0x17214c, 0x1c4d65}, + {0x172154, 0x1c4d65}, + {0x1721d0, 0x1c53dd}, // 1721c4 + 1*8 + 4 + {0x1721f0, 0x1c5415}, // 1721c4 + 5*8 + 4 + {0x171eb0, 0x1c54a1}, // 288 +}; + +u32 func_tbl[1721] = +{ + 0x8cc88cc3, + 0xd8084283, + 0x1ac0d205, + 0xbfcc283f, + 0x20012000, + 0x20014770, + 0x20004770, + 0xbf004770, + 0x481d4a1c, + 0xb538491d, + 0x68056913, + 0xf8b16914, + 0xf8b100b0, + 0xf5a310b2, + 0xeb0363fa, + 0x1b1b1345, + 0x1a5b1a1b, + 0xdb1a2b00, + 0x681b4b15, + 0x68dcb143, + 0x1ae36913, + 0x63faf5a3, + 0x1a5b1a1b, + 0xdb012b00, + 0xbd382001, + 0x681b4b0f, + 0x3000f9b3, + 0xda062b00, + 0x1ae46913, + 0x549cf504, + 0x2c003408, + 0x2000db01, + 0x4909bd38, + 0xf44f4809, + 0xf66b7202, + 0x2000fad9, + 0xbf00bd38, + 0x40501000, + 0x40328040, + 0x0017192c, + 0x00178bf0, + 0x00173250, + 0x001c5a3c, + 0x001c56b4, + 0x4ff0e92d, + 0x8c05461c, + 0x3062f893, + 0x9020f8d2, + 0xf8d06987, + 0xb089b01c, + 0x02ad4616, + 0x2012e9dd, + 0xf8b4b9d3, + 0xf1bcc068, + 0xd0150f00, + 0x6ab38b52, + 0xf202fb0c, + 0x07189205, + 0xf20cfb05, + 0xd0199206, + 0xebb72300, + 0xf44f0a09, + 0x930776fa, + 0xf5039b05, + 0xe9c473c8, + 0xe071a31f, + 0xf0402800, + 0x950680ba, + 0x0a01f04f, + 0x6ab28b50, + 0xf000fb0a, + 0x90050712, + 0x8122f040, + 0xf8df6af2, + 0xf8dfc2bc, + 0xf8dc82bc, + 0x48983000, + 0x1203f3c2, + 0x037ff023, + 0x2002f818, + 0xf8cc4313, + 0x6ab33000, + 0xf3c34a93, + 0xea4113c0, + 0xf04f5103, + 0x60014300, + 0xf3bf6013, + 0xbf008f4f, + 0x00586813, + 0x4b8dd5fc, + 0xf9b16819, + 0x29001000, + 0x80aef2c0, + 0x68124a88, + 0xfa82fa1f, + 0x07126ab2, + 0x809df040, + 0xf8df6af2, + 0x4882c25c, + 0x1000f8dc, + 0x1203f3c2, + 0x017ff021, + 0x2002f818, + 0xf8cc430a, + 0x6ab22000, + 0xf3c2497c, + 0x051212c0, + 0x0218f042, + 0x4600f04f, + 0x600e6002, + 0x8f4ff3bf, + 0x680abf00, + 0xd5fc0056, + 0xf9b3681b, + 0x2b003000, + 0x4a72db6e, + 0x3062f894, + 0x22006816, + 0xebaab2b6, + 0x92070a06, + 0x0909ebb7, + 0x0a0aeb19, + 0xd0872b00, + 0x79e5ea4f, + 0x464b462a, + 0x46594638, + 0xfc50f679, + 0x92021bba, + 0xfb009a07, + 0xeb6bf309, + 0xfb050202, + 0x92033301, + 0x0105fba0, + 0xe9dd4419, + 0x42992302, + 0x4290bf08, + 0xe9cdbf38, + 0x4b5e0102, + 0x6702e9dd, + 0x9b066819, + 0x018918f6, + 0x9905d430, + 0xf5a11a71, + 0x4b597ac8, + 0x69194a59, + 0x691b6812, + 0x44511a89, + 0xf5a31acb, + 0x3b1453a5, + 0x6a632b00, + 0x1949bfb8, + 0xd022428b, + 0x6a1a4b52, + 0xd04142a2, + 0xf1044d51, + 0xf8d50018, + 0x479831e0, + 0x30a8f8d5, + 0xb0094620, + 0x4ff0e8bd, + 0xf8904718, + 0xf1baa002, + 0xd1010f00, + 0xa003f890, + 0xf00afb05, + 0xe73d9006, + 0x98054946, + 0xeba11a09, + 0x44b20a0a, + 0xb009e7cb, + 0x8ff0e8bd, + 0x0058680b, + 0x4941d48d, + 0xf44f4841, + 0xf66b72ae, + 0x2200f98f, + 0x3062f894, + 0xf5aa9207, + 0xf44f7afa, + 0xe78776fa, + 0x00516812, + 0xaf4ef53f, + 0x48384937, + 0x72aef44f, + 0xf97cf66b, + 0x7afaf44f, + 0xe7474b2c, + 0x62614a2c, + 0x01926812, + 0x4a32d4b8, + 0x68124d32, + 0x1024f893, + 0x4207f3c2, + 0x2d23f885, + 0x4a2fbb61, + 0x6812492f, + 0x20016809, + 0xf885b2d2, + 0xf8832d24, + 0x780b0024, + 0xd1054283, + 0x4a2b492a, + 0x6813600b, + 0x60134303, + 0x2d23f895, + 0x3d24f895, + 0xd21b429a, + 0x681b4b17, + 0x3000f9b3, + 0xda0d2b00, + 0x2d23f895, + 0x3d24f895, + 0xd307429a, + 0x48214920, + 0xf44f4d15, + 0xf66b7221, + 0xe787f96f, + 0xe7854d12, + 0xf44f2200, + 0x920776fa, + 0xe7354692, + 0x4b124814, + 0x1d23f895, + 0x2d24f895, + 0x6806681b, + 0xb2f64816, + 0x96000c1b, + 0xff82f66a, + 0xbf00e7d4, + 0x40328160, + 0x4032816c, + 0x00173250, + 0x4032004c, + 0x40501000, + 0x403280a4, + 0x00178d3c, + 0x00171a30, + 0xfffffe70, + 0x001c5a20, + 0x001c56e0, + 0x40328044, + 0x001e4000, + 0x40320090, + 0x00173270, + 0x40328070, + 0x40328074, + 0x001c5a58, + 0x001c5718, + 0x001c5704, + 0x40328164, + 0x00041830, + 0x4ff0e92d, + 0x9354f8df, + 0xb020f8d9, + 0xf44fb083, + 0xf6692000, + 0xf1bbfd9b, + 0xf0000f00, + 0x4cb480f1, + 0xa33cf8df, + 0x4fb37fa1, + 0x8338f8df, + 0x29004eb2, + 0x80e6f000, + 0xd50e0708, + 0xf8db4bb0, + 0x681b0070, + 0x2010f8da, + 0x4403685b, + 0x2b001a9b, + 0x80c7f2c0, + 0x01f7f001, + 0x074a77a1, + 0x8095f100, + 0xd515078b, + 0x3004f8db, + 0x0302f023, + 0x3004f8cb, + 0x301df899, + 0xd1082b05, + 0x48a34da2, + 0x31d8f8d5, + 0x23004798, + 0xf8897fa1, + 0xf001301d, + 0x77a101fd, + 0xd52d07cd, + 0x01586833, + 0x809ff140, + 0x0c096831, + 0x7f7cf411, + 0x80a6f000, + 0x4b983910, + 0x01fff001, + 0x1281eb01, + 0x03c2eb03, + 0x2021f893, + 0xf0002a00, + 0x4a93809d, + 0x68137f99, + 0xf9b34a92, + 0xf44f3000, + 0xfb0060a4, + 0x2b002201, + 0xf2c09200, + 0x683b809c, + 0x2fe0f413, + 0x80a5f000, + 0xf0017fa1, + 0x77a101fe, + 0xd59e068a, + 0x49894e88, + 0xf3c56835, + 0x463a1741, + 0xf66a2002, + 0x2300ff23, + 0x1547f3c5, + 0x3078f88b, + 0xf0402f00, + 0x4e7b80af, + 0x4d824a81, + 0xf4236813, + 0x60137300, + 0x3004f8db, + 0x21d8f8d6, + 0x0301f023, + 0x3004f8cb, + 0x0028f10b, + 0x682a4790, + 0x2b027813, + 0x8114f000, + 0x680b4974, + 0x0301f023, + 0x7813600b, + 0xd1122b01, + 0x41fff501, + 0x4a733134, + 0x3010f8da, + 0xf8b26808, + 0xf8d610b2, + 0xeb0321e0, + 0x1a591340, + 0x0018f10b, + 0x682b4790, + 0x2b02781b, + 0x80b4f000, + 0xf0017fa1, + 0x77a101df, + 0x4968e74f, + 0x20024d5d, + 0xfedcf66a, + 0x68634966, + 0xf022680a, + 0x600a0204, + 0xf4436822, + 0x431a7300, + 0xf8d5614a, + 0x60631498, + 0x0063f89b, + 0x3494f8d5, + 0x4798465a, + 0x7fa1683b, + 0x737cf023, + 0xf8d8603b, + 0xf0013000, + 0xf44301fb, + 0xf8c80380, + 0x77a13000, + 0x4856e742, + 0xfe66f66a, + 0x2200e782, + 0x006cf89b, + 0xf6584611, + 0xb160fd17, + 0xe72f7fa1, + 0xf66a4850, + 0xe775fe59, + 0xf66a484f, + 0xe771fe55, + 0xe8bdb003, + 0xf8da8ff0, + 0x7fa13010, + 0x3070f8cb, + 0x4593e71e, + 0xaf61f43f, + 0x48494948, + 0x22e6f240, + 0xf818f66b, + 0xf413683b, + 0xf47f2fe0, + 0x6832af5b, + 0x49446833, + 0xf3c30fd0, + 0x46057380, + 0x20024602, + 0xf66a9301, + 0x9b01fe81, + 0x0205ea53, + 0xf43f4628, + 0x4d2baf49, + 0x46199a00, + 0x323cf8d5, + 0x68334798, + 0x4300f023, + 0x68336033, + 0x4380f023, + 0xe7396033, + 0x2010f8da, + 0x529cf502, + 0x32084631, + 0xf8dae005, + 0x1ad33010, + 0xf2c02b00, + 0x680b80b2, + 0xd5f6071b, + 0x8080f8df, + 0x3000f8d8, + 0x0308f023, + 0x3000f8c8, + 0x3000f8d8, + 0xd51206de, + 0x49274e15, + 0xf66a2002, + 0xf8d6fe4b, + 0xf005323c, + 0x08780101, + 0x4798465a, + 0x3000f8d8, + 0x0310f023, + 0x3000f8c8, + 0x4b1fe722, + 0x4e0b491f, + 0x601d2501, + 0xf66a2002, + 0x4b1dfe35, + 0xe717601d, + 0x22304b1c, + 0xf657601a, + 0xe745fddb, + 0x00178bb8, + 0x40320074, + 0x40320070, + 0x00173244, + 0x00171a30, + 0x00178d48, + 0x00175aa8, + 0x00173250, + 0x00177738, + 0x4032008c, + 0x001c57ec, + 0x4033b390, + 0x00173270, + 0x0017192c, + 0x001c578c, + 0x4032004c, + 0x001c5790, + 0x001c57a8, + 0x001c57bc, + 0x001c5a70, + 0x001c57d0, + 0x001c57e0, + 0x001c580c, + 0x40328564, + 0x001c5818, + 0x40328568, + 0x40320038, + 0x00178d3c, + 0x40501000, + 0x4032006c, + 0x8310f3ef, + 0xd40307d8, + 0x4b31b672, + 0x601a2201, + 0x4f314b30, + 0x3201681a, + 0x2100601a, + 0x6039683a, + 0x080ff002, + 0x2010f8da, + 0x46164631, + 0xf247460a, + 0xe0045030, + 0x1010f8da, + 0x42811b89, + 0x6839d81b, + 0xd1f70709, + 0x49264825, + 0xc000f8d0, + 0x4616680f, + 0x2010f8da, + 0x0f00f1b8, + 0x4a22d11a, + 0x60112104, + 0xb132681a, + 0x3a01491a, + 0x680b601a, + 0xb103b90a, + 0x682ab662, + 0x491ce6b0, + 0x20029200, + 0xfdb0f66a, + 0x9a004b14, + 0x4919e7d3, + 0xf66a2002, + 0xe74bfda9, + 0x070cea07, + 0xd4e0077f, + 0x1600e9cd, + 0x46164680, + 0x077ae001, + 0x9a00d412, + 0x7000f8d8, + 0xf8da6812, + 0xf2471010, + 0x1b895030, + 0xea074281, + 0xd9f00702, + 0x2002490b, + 0xfd8cf66a, + 0xe7ea4b02, + 0xe7c49e01, + 0x0017569c, + 0x00172600, + 0x40320038, + 0x4032806c, + 0x40328074, + 0x40328070, + 0x001c5828, + 0x001c57f8, + 0x001c5834, + 0xf890b5f8, + 0x2b003064, + 0xf890d03a, + 0x4604308a, + 0xd1362b00, + 0xf8944e32, + 0x4a32306c, + 0x6a674932, + 0xeb036a09, + 0xeb021383, + 0x42a103c3, + 0x443d685d, + 0xf8d6d044, + 0x462931e0, + 0x0018f104, + 0x46204798, + 0xfafaf65e, + 0x1080f8d4, + 0x3214f8d6, + 0x46204439, + 0xf8d64798, + 0x462a30a4, + 0x46204639, + 0xb9784798, + 0x3078f894, + 0x49216862, + 0xb2db3301, + 0x0201f042, + 0xf8846809, + 0x60623078, + 0x4293780a, + 0xd029d811, + 0x3b01bdf8, + 0x2b01b2db, + 0x308af880, + 0x2b02d912, + 0xd1c04e13, + 0x0063f890, + 0x31c0f8d6, + 0x47982100, + 0xf8d6e7b9, + 0xf8941164, + 0x4622006c, + 0x40f8e8bd, + 0xbb84f658, + 0x40f8e8bd, + 0xbb3af65e, + 0x62654b0c, + 0x30b5f893, + 0xd1ba2b00, + 0x681b4b0a, + 0x2b02781b, + 0xe7b4d1af, + 0xe8bd4620, + 0xf66540f8, + 0xbf00b8d1, + 0x00171a30, + 0x00175aa8, + 0x00178d3c, + 0x00173244, + 0x0017192c, + 0x00173270, + 0x4c5cb538, + 0x68224b5c, + 0xf002495c, + 0x701a020f, + 0xf66a2002, + 0x6823fcef, + 0xd02e0718, + 0x4a594958, + 0x2000680b, + 0x4300f023, + 0x6020600b, + 0x07596813, + 0x4b55d5fc, + 0x4a554952, + 0x60182004, + 0xf043680b, + 0x600b4300, + 0xf0436813, + 0x60136300, + 0x20024950, + 0xfcd0f66a, + 0x4a4f4b47, + 0x494f4847, + 0x601c2420, + 0x24016813, + 0x0310f043, + 0x70446013, + 0x781b680b, + 0xd0382b03, + 0xd0062b01, + 0x4a44bd38, + 0xf0236813, + 0x60136300, + 0x4a45e7e2, + 0x035b6813, + 0x4a44d5fc, + 0x311c6911, + 0x1acb6913, + 0xdafb2b00, + 0x681c4b41, + 0x4b41b174, + 0xf9b3681b, + 0x2b003000, + 0x4a3fdb4e, + 0xf8b268e3, + 0x4a3a10b2, + 0x21041a5b, + 0x60916313, + 0x4a3c493b, + 0xf443680b, + 0x600b4300, + 0xf4436d13, + 0x65134300, + 0xf0236dd3, + 0xf0430303, + 0xf0434300, + 0x65d30301, + 0x4b2fbd38, + 0xb174681c, + 0x681b4b2e, + 0x3000f9b3, + 0xdb152b00, + 0x68e34a2c, + 0x10b2f8b2, + 0x1a5b4a27, + 0x63132104, + 0x4b2b6091, + 0xf042681a, + 0x601a0201, + 0x075a681b, + 0x4b28d5ae, + 0x7200f44f, + 0xbd38601a, + 0x4d214b1e, + 0x68e3691a, + 0x10b2f8b5, + 0x1a521a9a, + 0xdae32a00, + 0x48224921, + 0x32dbf240, + 0xfddef66a, + 0xf8b568e3, + 0xe7d910b2, + 0x69124d17, + 0xf8b568e3, + 0x1a9a10b2, + 0x2a001a52, + 0x4918daab, + 0xf2404818, + 0xf66a32e9, + 0x68e3fdcb, + 0x10b2f8b5, + 0xbf00e7a1, + 0x40320038, + 0x00172604, + 0x001c5844, + 0x40328074, + 0x4032806c, + 0x40328070, + 0x40328048, + 0x001c584c, + 0x40580010, + 0x00173274, + 0x40041020, + 0x40501000, + 0x00178bf0, + 0x00173250, + 0x0017192c, + 0x40240014, + 0x40506000, + 0x40044084, + 0x40044100, + 0x001c5a8c, + 0x001c5854, + 0xf890b380, + 0xb36b3064, + 0x47f0e92d, + 0x4a7b4b7a, + 0xf8df4e7b, + 0xf8df920c, + 0x4d7a8240, + 0x460c487a, + 0x2702497a, + 0xc470f8d1, + 0x7180f8c3, + 0x49786892, + 0xc044f8c2, + 0x601f2201, + 0xf8d97032, + 0x680b2010, + 0xf0236072, + 0x600b0302, + 0x1000f8d8, + 0xc0b2f8b5, + 0x68018f8b, + 0xebb34463, + 0xea4f1f41, + 0xd3021a41, + 0x87f0e8bd, + 0x496b4770, + 0xf66a4638, + 0xf8d9fbdf, + 0x68f32010, + 0x2b001a9b, + 0xf2804b67, + 0xf89380a2, + 0xf8932d23, + 0x2a012d24, + 0xf893d90a, + 0x68612d23, + 0xf0002a00, + 0xf893809e, + 0x3a012d23, + 0xaa02fb01, + 0x3d23f893, + 0x4652495d, + 0xf66a2002, + 0xf8d8fbbf, + 0xf8b53000, + 0x8f9a00b2, + 0x4b594955, + 0xc178f8df, + 0x680b691c, + 0xebaa4402, + 0xf0220202, + 0xf0030703, + 0x433b0303, + 0x600b4f53, + 0x60b2683b, + 0x0301f043, + 0xf8dc603b, + 0xf8dc3000, + 0xf3c31000, + 0xf4210309, + 0xf043717f, + 0xf0210301, + 0x430b0103, + 0x3000f8cc, + 0x4422683b, + 0x60f2075b, + 0x4b47d503, + 0x7200f44f, + 0x4a46601a, + 0x68134c46, + 0xf043493d, + 0x60130308, + 0xf0436823, + 0x60230302, + 0xf5a2680b, + 0x3a1022fe, + 0x0301f043, + 0x6911600b, + 0x7196f501, + 0x1a5b6913, + 0xdbfb2b00, + 0x681b4b3b, + 0x2b01781b, + 0x493ad188, + 0x6a4b4c3a, + 0xf0236824, + 0xf04303ff, + 0x624b03df, + 0xf4236a4b, + 0xf443437f, + 0x624b435f, + 0x4b34b194, + 0xf9b3681b, + 0x2b003000, + 0x68e2db31, + 0x48316861, + 0xfb04f66a, + 0x10b2f8b5, + 0x4a2568e3, + 0x21041a5b, + 0x60916313, + 0x4a28492c, + 0xf443680b, + 0x600b4300, + 0xf4436d13, + 0x65134300, + 0xf0236dd3, + 0xf0430303, + 0xf0434300, + 0x65d30301, + 0xf4436813, + 0x60135380, + 0x4922e74e, + 0x3d23f893, + 0x46524638, + 0xfb2ef66a, + 0xf893e76d, + 0x3a012d24, + 0xaa02fb01, + 0x6913e760, + 0x1ad368e2, + 0x28001a18, + 0x4919dac8, + 0xf2404819, + 0xf66a4231, + 0xe7c0fca1, + 0xe000e100, + 0xe000ed00, + 0x00173254, + 0x0017192c, + 0x40328040, + 0x00171a30, + 0x40320084, + 0x001c589c, + 0x001e4000, + 0x001c58b8, + 0x40501000, + 0x40044084, + 0x40044100, + 0x40580010, + 0x40580018, + 0x00173274, + 0x40506000, + 0x00178bf0, + 0x00173250, + 0x001c58c8, + 0x40240014, + 0x001c58a4, + 0x001c5aac, + 0x001c5854, + 0x00173244, + 0x4ff0e92d, + 0x4abd4bbc, + 0xf852681b, + 0xf9b34020, + 0x4abb3000, + 0x8b02ed2d, + 0x02c0eb02, + 0xee082b00, + 0xb08b2a10, + 0xea4f4683, + 0xf2c005c0, + 0x462082c4, + 0xf8d0f669, + 0xf8534bb2, + 0xf1b9903b, + 0xf0000f00, + 0x230080e5, + 0x3303e9cd, + 0xf5059302, + 0x9301629e, + 0xf10b469a, + 0x9208039e, + 0x464c9305, + 0xf898e079, + 0x06df3004, + 0x80d2f140, + 0xf4226932, + 0x9a010900, + 0x9010f8c6, + 0x065d3201, + 0xf1009201, + 0x4fa180e0, + 0xf8d74620, + 0x47983418, + 0xf4036b63, + 0xf5b31360, + 0xbf081f20, + 0xf3ef46a2, + 0x07d98310, + 0xb672d403, + 0x22014b99, + 0x4d99601a, + 0xee18682b, + 0x33010a10, + 0xf669602b, + 0x6b63f951, + 0x1360f403, + 0x1f60f5b3, + 0x80a9f000, + 0xb133682b, + 0x3b014a8f, + 0x602b6812, + 0xb102b90b, + 0xf8dfb662, + 0xf8d88240, + 0x78193000, + 0xf419b391, + 0xd1050300, + 0x002ef9b4, + 0x28008de2, + 0x817ef2c0, + 0x302cf894, + 0x202af894, + 0xf8b44884, + 0xeb03c008, + 0x441303c3, + 0x530ef203, + 0xf8502901, + 0xeba22023, + 0xf840020c, + 0xf0002023, + 0x6ce080d5, + 0xf650b138, + 0xf8d8fd4f, + 0x781e3000, + 0xf0002e01, + 0x4621810f, + 0xf66a4658, + 0x6b63fc95, + 0x1360f403, + 0x1f60f5b3, + 0x80eaf000, + 0xf8534b6d, + 0x2c00403b, + 0xf8d4d05c, + 0x6d268048, + 0x0f00f1b8, + 0xaf7ff47f, + 0x8310f3ef, + 0xd40307d9, + 0x4b67b672, + 0x601a2201, + 0x682b4d66, + 0x0a10ee18, + 0x602b3301, + 0xf8ecf669, + 0xb133682b, + 0x3b014a60, + 0x602b6812, + 0xb102b90b, + 0x4f5cb662, + 0x9178f8df, + 0x817cf8df, + 0xf6764620, + 0xf8d7ff21, + 0x46203418, + 0xf8d74798, + 0x220033ac, + 0xf18bfa5f, + 0x47984620, + 0x302cf894, + 0x102af894, + 0xeb038922, + 0x440b03c3, + 0x530ef203, + 0x1023f859, + 0xf8491a8a, + 0xf8d82023, + 0x781b3000, + 0xd0642b01, + 0xd0532b02, + 0xd1082b03, + 0x3054f894, + 0xf0002b01, + 0x8de381db, + 0xf10007d9, + 0x4621814c, + 0xf66a4658, + 0x4b3ffc31, + 0x403bf853, + 0xd1a22c00, + 0xecbdb00b, + 0xe8bd8b02, + 0x4b388ff0, + 0xf9b3681b, + 0x2b003000, + 0x808bf2c0, + 0xe9dd4650, + 0xf6761201, + 0x4640fee7, + 0xfbeef659, + 0xe9cd2300, + 0x469a3301, + 0xf8d8e742, + 0xb1b220dc, + 0x8ce38811, + 0x1311eba3, + 0x030bf3c3, + 0x71fef240, + 0xd816428b, + 0xea4f2b3f, + 0xd8121113, + 0x0241eb02, + 0x030ff003, + 0xfa428852, + 0x07d8f303, + 0x9b02d509, + 0x93023301, + 0x0304f44f, + 0x0903ea49, + 0x9010f8c6, + 0xf44fe6fb, + 0xe7f72380, + 0x3054f894, + 0xf0002b01, + 0x8de38117, + 0xd5ae07da, + 0xf6682080, + 0xf8d8ff75, + 0x781b3000, + 0xf894e79c, + 0x2b013054, + 0x80f7f000, + 0x07db8de3, + 0x2080d59f, + 0xff66f668, + 0x3000f8d8, + 0xe78b781b, + 0x3054f894, + 0xf47f2b01, + 0xf890af26, + 0xf0433f20, + 0xf1060302, + 0xf8800110, + 0x22043f20, + 0xf6512012, + 0x8ba1fd2b, + 0x46224809, + 0xf91ef66a, + 0xbf00e713, + 0x00173250, + 0x0003fc40, + 0x00175980, + 0x00171a30, + 0x0017569c, + 0x00172600, + 0x00173278, + 0x001c5904, + 0x00173274, + 0x2b009b03, + 0x808bf040, + 0xf48bfa5f, + 0xf6682080, + 0x4620ff2f, + 0xfebef659, + 0x93032300, + 0xf1bae706, + 0xf47f0f00, + 0x49aaaf71, + 0xf24048aa, + 0xf66a42e7, + 0xe769fac7, + 0xe9d34ba8, + 0x69130201, + 0x46804798, + 0xf43f2800, + 0xf650aee8, + 0x4603fd49, + 0xf0002800, + 0x22008096, + 0x600249a1, + 0x605a6808, + 0x609a4440, + 0xf3ef6018, + 0x07d28210, + 0x812ef140, + 0x6829489c, + 0x31016802, + 0x0201f042, + 0x60026029, + 0x68124a98, + 0xd4fb0790, + 0x68124a97, + 0xf0002a00, + 0x4e968116, + 0x2a006872, + 0x8149f000, + 0x4a926053, + 0x681289b0, + 0x4b906073, + 0x30013201, + 0x601a81b0, + 0x68134a8c, + 0x0301f023, + 0x29006013, + 0xaeadf43f, + 0x39014b8b, + 0x6029681b, + 0xf47f2900, + 0x2b00aea6, + 0xaea3f43f, + 0xe6a0b662, + 0x49866d20, + 0x64a36503, + 0xf8946103, + 0x63e3502b, + 0xf3c29b05, + 0x20a4020e, + 0x0201f042, + 0x3005fb10, + 0x63a4f44f, + 0x1305fb03, + 0xeb0185e2, + 0x4a7c00c0, + 0x46219304, + 0xffe2f668, + 0xf4036b63, + 0xf5b31360, + 0xd0021f60, + 0x93032301, + 0x9804e686, + 0xfd60f656, + 0xf43f2800, + 0x4d73af6f, + 0x31fff895, + 0xf47f2b00, + 0x9b04af69, + 0xf8cd9a08, + 0xfa5fb00c, + 0xf8ddf48b, + 0x46d39014, + 0x4698189e, + 0xe00946ba, + 0xff74f668, + 0x3424f8da, + 0x46214638, + 0xf8954798, + 0xb92331ff, + 0x7039f858, + 0x2f004630, + 0x46dad1f0, + 0xb00cf8dd, + 0x2080e74a, + 0xfe7af668, + 0x4640e6af, + 0xfb92f650, + 0xf899e647, + 0x22043f20, + 0x0302f043, + 0x0110f106, + 0xf8892012, + 0xf6513f20, + 0x8de3fc43, + 0xf53f07da, + 0xe6fdaefc, + 0xf6539306, + 0x9b06f95d, + 0x28009007, + 0x80c0f000, + 0x4b509309, + 0x2a00681a, + 0x80c2f000, + 0xf6684618, + 0x9b07ff39, + 0xf04f2204, + 0x21120e00, + 0x70994684, + 0xf883701a, + 0xf883e001, + 0xf106e003, + 0x18980110, + 0xc018f8cd, + 0xfdc4f678, + 0x8a194b42, + 0xf5b19b09, + 0xd85a7fc3, + 0xb29b1c4b, + 0x00ca9309, + 0x4b3e9806, + 0x68188181, + 0x4b3d9907, + 0x0c02eb00, + 0x0e01f04f, + 0x1004f8cc, + 0x400b5881, + 0x6380f043, + 0x0308f043, + 0xf8995083, + 0x4a333782, + 0x82119909, + 0xf8894473, + 0x9b063782, + 0x22082100, + 0xc004f8c3, + 0xe00ef883, + 0x609a6019, + 0x8310f3ef, + 0xd40307db, + 0x4b25b672, + 0xe000f8c3, + 0x482a682b, + 0x33019906, + 0xf668602b, + 0xf8d7fea5, + 0x47983444, + 0xb133682b, + 0x3b014a1d, + 0x602b6812, + 0xb102b90b, + 0x8de3b662, + 0xf53f07d8, + 0xe67cae7b, + 0x69324b1f, + 0x601a681b, + 0xffaef64e, + 0x4b1de61d, + 0x421c681b, + 0xad37f47f, + 0x481b490a, + 0x6293f44f, + 0xf988f66a, + 0x2200e52f, + 0x46119309, + 0x4a17e7a4, + 0xbb6a6812, + 0x4e094a15, + 0xe6e86013, + 0x4a08b672, + 0xe6cd6016, + 0x001c5ad0, + 0x001c58ec, + 0x001755b4, + 0x001719e4, + 0x40240060, + 0x40240064, + 0x0017559c, + 0x0017569c, + 0x00177738, + 0x001c4001, + 0x00175780, + 0x0017469c, + 0x001755d4, + 0x31ff0000, + 0x001746a4, + 0x00180000, + 0x0017a5a0, + 0x001c58d4, + 0x40240068, + 0x9306480b, + 0xff78f669, + 0x9b066829, + 0x4809e7ca, + 0xf6699306, + 0x6829ff71, + 0xe6b09b06, + 0xb00b4806, + 0x8b02ecbd, + 0x4ff0e8bd, + 0xbcdef64f, + 0xe7f64803, + 0x001c591c, + 0x001c5924, + 0x001c5938, + 0x001c594c, + 0xf240b530, + 0xb0834003, + 0x4619460d, + 0xf6682308, + 0xe9d5f98d, + 0x46043200, + 0x3200e9c0, + 0xe9cd4611, + 0x48042200, + 0xff48f669, + 0xf6684620, + 0x2000f9af, + 0xbd30b003, + 0x001c5970, + 0x460cb570, + 0x4619b084, + 0x4012f240, + 0xf6682308, + 0x6822f971, + 0x429a4b12, + 0xd0104605, + 0x686168a3, + 0xe9c54810, + 0xe9cd2300, + 0x92003301, + 0xf669461a, + 0x4628ff27, + 0xf98ef668, + 0xb0042000, + 0xe9d4bd70, + 0x68116301, + 0x404b4808, + 0x404b4033, + 0xe9d46013, + 0x680a1300, + 0x68a29200, + 0xff12f669, + 0xe7dd6822, + 0x40344058, + 0x001c59cc, + 0x001c59b0, + 0xbf002332, + 0xf0133b01, + 0xd1fa03ff, + 0xbf004770, + 0xbf0023c8, + 0xf0133b01, + 0xd1fa03ff, + 0xbf004770, + 0x49724a71, + 0xf8df6813, + 0xf023c208, + 0xb5f00302, + 0x680b6013, + 0x4c6f4a6e, + 0xf4234f6f, + 0x600b6300, + 0x496e6813, + 0xf4232800, + 0xbf1c7380, + 0x468c4627, + 0x20326013, + 0x3801bf00, + 0x00fff010, + 0xf8dfd1fa, + 0x4a63e18c, + 0x3000f8de, + 0xf4234e65, + 0xf8ce5380, + 0x68133000, + 0x7300f423, + 0xf8de6013, + 0xf4433000, + 0xf8ce6380, + 0xf8de3000, + 0xf4433000, + 0xf8ce6300, + 0x46723000, + 0x1cc125ff, + 0x4664b2c9, + 0xf0236813, + 0x430b03ff, + 0xf8546013, + 0x60333b04, + 0xf4436813, + 0x60137380, + 0xbf00bf00, + 0xbf00bf00, + 0x049b6813, + 0x3901d5fc, + 0x428db2c9, + 0x3004d1e8, + 0x28803504, + 0xf10cb2ed, + 0xd1de0c10, + 0x3000f8de, + 0x6380f423, + 0x3000f8ce, + 0xbf0023c8, + 0xf0133b01, + 0xd1fa03ff, + 0x493f4c3e, + 0x48436822, + 0x6200f422, + 0x680a6022, + 0x0280f042, + 0x680a600a, + 0x7280f442, + 0x600a3f04, + 0xf022680a, + 0x431a021f, + 0xf857600a, + 0x60022f04, + 0xf042680a, + 0x600a0220, + 0xbf00bf00, + 0xbf00bf00, + 0x0552680a, + 0x3301d5fc, + 0xd1e92b10, + 0xf023680b, + 0x600b0380, + 0xbf0023c8, + 0xf0133b01, + 0xd1fa03ff, + 0x4a2d4927, + 0xf423680b, + 0x600b7380, + 0xf4436813, + 0x60131300, + 0xbf002332, + 0xf0133b01, + 0xd1fa03ff, + 0x4a1f4b1e, + 0x4d256819, + 0x48264c25, + 0xf4414e26, + 0x60195180, + 0xf4416811, + 0x60117100, + 0xf4416819, + 0x60196100, + 0x4b216811, + 0x7180f441, + 0x682a6011, + 0xf442491f, + 0x602a5280, + 0xf4226822, + 0x60222280, + 0xf0226802, + 0x60025200, + 0x60306818, + 0x45bbf5a5, + 0x685c3d78, + 0x689d602c, + 0x4a16600d, + 0x601568dd, + 0x691d4815, + 0xe9d36005, + 0x4c145005, + 0x602569db, + 0x61136108, + 0xbf00bdf0, + 0x40580018, + 0x40344060, + 0x4034406c, + 0x001718a4, + 0x00171824, + 0x00171624, + 0x40344064, + 0x40344070, + 0x40344058, + 0x40342014, + 0x40342018, + 0x4034201c, + 0x4033c218, + 0x001718e4, + 0x4033c220, + 0x4033c224, + 0x4033c228, + 0x4033c22c, + 0x00171324, + 0x6c616821, + 0x6e6f615f, + 0x656d6974, + 0x69745f72, + 0x705f656d, + 0x28747361, + 0x656d6974, + 0x743e2d72, + 0x20656d69, + 0x3035202b, + 0x00293030, + 0x616d786e, + 0x69745f63, + 0x6f5f656d, + 0x69615f6e, + 0x61765f72, + 0x5f64696c, + 0x66746567, + 0x21202928, + 0x0030203d, + 0x6d697464, + 0x2c64253a, + 0x252c6425, + 0x64252c64, + 0x00000a0d, + 0x6f76282a, + 0x6974616c, + 0x7520656c, + 0x38746e69, + 0x2a20745f, + 0x5f672629, + 0x5f6e6f61, + 0x72616873, + 0x642e6465, + 0x5f6d6974, + 0x5f746e63, + 0x736e6f61, + 0x65726168, + 0x203c2064, + 0x6f76282a, + 0x6974616c, + 0x7520656c, + 0x38746e69, + 0x2a20745f, + 0x5f672629, + 0x5f6e6f61, + 0x72616873, + 0x642e6465, + 0x5f6d6974, + 0x69726570, + 0x615f646f, + 0x68736e6f, + 0x64657261, + 0x00000000, + 0x00002c4c, + 0x654b849b, + 0x78646979, + 0x766e6920, + 0x64696c61, + 0x3230252c, + 0x00000a58, + 0x6e49849b, + 0x696c6176, + 0x656b2064, + 0x78646979, + 0x0000000a, + 0x6e49849b, + 0x696c6176, + 0x54532064, + 0x64253a41, + 0x0000000a, + 0x5f666976, + 0x20617473, + 0x76203d3d, + 0x00006669, + 0x64253d54, + 0x0d64252c, + 0x0000000a, + 0x3a4e4342, + 0x0a0d6425, + 0x00000000, + 0x206e6362, + 0x656e6f64, + 0x6d697420, + 0x74756f65, + 0x00000a0d, + 0x20736366, + 0x0a0d6b6f, + 0x00000000, + 0x20736366, + 0x20746f6e, + 0x0a0d6b6f, + 0x00000000, + 0x656c6469, + 0x72726520, + 0x00000a0d, + 0x656c6469, + 0x746e6920, + 0x72726520, + 0x00000a0d, + 0x2c642564, + 0x00000000, + 0x0a0d6564, + 0x00000000, + 0x6c616821, + 0x63616d5f, + 0x745f7768, + 0x5f656d69, + 0x74736170, + 0x6d697428, + 0x3e2d7265, + 0x656d6974, + 0x67202d20, + 0x6669775f, + 0x65735f69, + 0x6e697474, + 0x702e7367, + 0x6f5f7277, + 0x5f6e6570, + 0x64737973, + 0x79616c65, + 0x00000029, + 0x78253d74, + 0x00000a0d, + 0x20746f6e, + 0x74736170, + 0x6425203a, + 0x0d64252c, + 0x0000000a, + 0x73736170, + 0x6425203a, + 0x0d64252c, + 0x0000000a, + 0x3a706c73, + 0x252c7825, + 0x000a0d78, + 0x655f656b, + 0x675f7476, + 0x29287465, + 0x65202620, + 0x625f7476, + 0x00007469, + 0x65647874, + 0x665f6373, + 0x74737269, + 0x203d2120, + 0x4c4c554e, + 0x00000000, + 0x73212121, + 0x20646e65, + 0x206d6663, + 0x78253a31, + 0x0d78252c, + 0x0000000a, + 0x0d677562, + 0x0000000a, + 0x6f696473, + 0x69617420, + 0x7265206c, + 0x0d726f72, + 0x0000000a, + 0x3a727265, + 0x206f6e20, + 0x2067736d, + 0x21746b70, + 0x00000a0d, + 0x21727265, + 0x74202121, + 0x63206c78, + 0x6e206d66, + 0x7562206f, + 0x72656666, + 0x726f6620, + 0x62737520, + 0x00000a0d, + 0x4244819d, + 0x57203a47, + 0x69746972, + 0x6d20676e, + 0x726f6d65, + 0x69772079, + 0x30206874, + 0x38302578, + 0x202f2078, + 0x203a6425, + 0x2578305b, + 0x5d783830, + 0x30203d20, + 0x38302578, + 0x202f2078, + 0x000a6425, + 0x6b73616d, + 0x69727720, + 0x253a6574, + 0x78252c78, + 0x2c78252c, + 0x0a0d7825, + 0x00000000, + 0x4244819d, + 0x57203a47, + 0x69746972, + 0x6d20676e, + 0x726f6d65, + 0x69772079, + 0x6d206874, + 0x3a6b7361, + 0x30257830, + 0x202c7838, + 0x61746164, + 0x2578303a, + 0x20783830, + 0x6425202f, + 0x305b203a, + 0x38302578, + 0x3d205d78, + 0x25783020, + 0x20783830, + 0x6425202f, + 0x0000000a, + 0x5f6c6168, + 0x6863616d, + 0x78725f77, + 0x6e63625f, + 0x7275645f, + 0x6f697461, + 0x0000006e, + 0x5f6c6168, + 0x6863616d, + 0x6c735f77, + 0x5f706565, + 0x63656863, + 0x61705f6b, + 0x00686374, + 0x745f6d6d, + 0x5f747462, + 0x706d6f63, + 0x5f657475, + 0x63746170, + 0x00000068, + 0x735f6d6d, + 0x7065656c, + 0x6f666e69, + 0x5f78725f, + 0x5f747665, + 0x63746170, + 0x00000068, + 0x786e7772, + 0x656c735f, + 0x635f7065, + 0x61676b6c, + 0x635f6574, + 0x69666e6f, + 0x61705f67, + 0x00686374, + 0x786e7772, + 0x656c735f, + 0x645f7065, + 0x73706565, + 0x7065656c, + 0x6e6f635f, + 0x5f676966, + 0x63746170, + 0x00000068, + 0x5f6c7874, + 0x5f6d6663, + 0x5f747665, + 0x63746170, + 0x00000068, + +}; + +struct aic_feature_t { + int hwinfo; + int fwlog_en; +}; + +#define CHIP_REV_U02 0x3 +#define CHIP_REV_U03 0x7 +#define CHIP_SUB_REV_U04 0x20 +u8 chip_id = 0; +u8 chip_sub_id = 0; // rom_id for 8800dc +u8 chip_mcu_id = 0; + +#ifdef CONFIG_PMIC_SETTING +u32 syscfg_tbl_pmic_u02[][2] = { + {0x40040000, 0x00001AC8}, // 1) fix panic + {0x40040084, 0x00011580}, + {0x40040080, 0x00000001}, + {0x40100058, 0x00000000}, +}; +#endif /* CONFIG_PMIC_SETTING */ + +u32 syscfg_tbl_u04[][2] = { + {0x40040000, 0x0000042C}, // protect usb replenish rxq / flush rxq, skip flush rxq before start_app + {0x40040004, 0x0000DD44}, + {0x40040008, 0x00000448}, + {0x4004000C, 0x0000044C}, + {0x0019B800, 0xB9F0F19B}, + {0x0019B804, 0x0019B81D}, + {0x0019B808, 0xBF00FA79}, + {0x0019B80C, 0xF007BF00}, + {0x0019B810, 0x4605B672}, // code + {0x0019B814, 0x21E0F04F}, + {0x0019B818, 0xBE0BF664}, + {0x0019B81C, 0xF665B510}, + {0x0019B820, 0x4804FC9D}, + {0x0019B824, 0xFA9EF66C}, + {0x0019B828, 0xFCA8F665}, + {0x0019B82C, 0x4010E8BD}, + {0x0019B830, 0xBAC6F66C}, + {0x0019B834, 0x0019A0C4}, + {0x40040084, 0x0019B800}, // out base + {0x40040080, 0x0000000F}, + {0x40100058, 0x00000000}, +}; + +u32 syscfg_tbl[][2] = { + {0x40500014, 0x00000101}, // 1) + {0x40500018, 0x0000010D}, // 2) + {0x40500004, 0x00000010}, // 3) the order should not be changed + #ifdef CONFIG_PMIC_SETTING + {0x50000000, 0x03220204}, // 2) pmic interface init + {0x50019150, 0x00000002}, // 3) for 26m xtal, set div1 + {0x50017008, 0x00000000}, // 4) stop wdg + #endif /* CONFIG_PMIC_SETTING */ +}; + +u32 syscfg_tbl_masked[][3] = { + {0x40506024, 0x000000FF, 0x000000DF}, // for clk gate lp_level + #ifdef CONFIG_PMIC_SETTING + //{0x50017008, 0x00000002, 0x00000000}, // stop wdg + #endif /* CONFIG_PMIC_SETTING */ +}; + + +u32 rf_tbl_masked[][3] = { + {0x40344058, 0x00800000, 0x00000000},// pll trx +}; + +u32 wdt_tbl_masked[][3] = { + {0x4010300c, 0x00000001, 0x00000001}, +}; + +static void system_config_8800(struct rwnx_hw *rwnx_hw){ + int syscfg_num; + int ret, cnt; + const u32 mem_addr = 0x40500000; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &rd_mem_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "%x rd fail: %d\n", mem_addr, ret); + return; + } + chip_id = (u8)(rd_mem_addr_cfm.memdata >> 16); + //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x00000004, &rd_mem_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "[0x00000004] rd fail: %d\n", ret); + return; + } + chip_sub_id = (u8)(rd_mem_addr_cfm.memdata >> 4); + //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + AICWFDBG(LOGINFO, "chip_id=%x, chip_sub_id=%x\n", chip_id, chip_sub_id); + + +#ifdef CONFIG_PMIC_SETTING + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + if (chip_id == CHIP_REV_U02) { + syscfg_num = sizeof(syscfg_tbl_pmic_u02) / sizeof(u32) / 2; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl_pmic_u02[cnt][0], syscfg_tbl_pmic_u02[cnt][1]); + if (ret) { + AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl_pmic_u02[cnt][0], ret); + return; + } + } + } + } +#endif + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + if ((chip_id == CHIP_REV_U03) && (chip_sub_id == CHIP_SUB_REV_U04)) { + syscfg_num = sizeof(syscfg_tbl_u04) / sizeof(u32) / 2; + AICWFDBG(LOGINFO, "cfg u04\n"); + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl_u04[cnt][0], syscfg_tbl_u04[cnt][1]); + if (ret) { + AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl_u04[cnt][0], ret); + return; + } + } + } + } + + syscfg_num = sizeof(syscfg_tbl) / sizeof(u32) / 2; + + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl[cnt][0], syscfg_tbl[cnt][1]); + if (ret) { + AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl[cnt][0], ret); + return; + } + } + syscfg_num = sizeof(syscfg_tbl_masked) / sizeof(u32) / 3; + for (cnt = 0; cnt < syscfg_num; cnt++) { + if (syscfg_tbl_masked[cnt][0] == 0x00000000) { + break; + } + + ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw, + syscfg_tbl_masked[cnt][0], syscfg_tbl_masked[cnt][1], syscfg_tbl_masked[cnt][2]); + if (ret) { + AICWFDBG(LOGERROR, "%x mask write fail: %d\n", syscfg_tbl_masked[cnt][0], ret); + return; + } + } + + +} + + +static void system_config(struct rwnx_hw *rwnx_hw) +{ + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + system_config_8800(rwnx_hw); + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ + system_config_8800dc(rwnx_hw); + } +} + +static int wdt_config(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw, + wdt_tbl_masked[0][0], wdt_tbl_masked[0][1], wdt_tbl_masked[0][2]); + if (ret) { + printk("wdt config %x write fail: %d\n", wdt_tbl_masked[0][0], ret); + } + return ret; +} +#if 0 +static void rf_config(struct rwnx_hw *rwnx_hw) +{ + int ret; + ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw, + rf_tbl_masked[0][0], rf_tbl_masked[0][1], rf_tbl_masked[0][2]); + if (ret) { + printk("rf config %x write fail: %d\n", rf_tbl_masked[0][0], ret); + } +} +#endif +#if 0 +static int start_from_bootrom(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + + /* memory access */ +#ifdef CONFIG_ROM_PATCH_EN + const u32 rd_addr = ROM_FMAC_FW_ADDR; + const u32 fw_addr = ROM_FMAC_FW_ADDR; +#else + const u32 rd_addr = RAM_FMAC_FW_ADDR; + const u32 fw_addr = RAM_FMAC_FW_ADDR; +#endif + struct dbg_mem_read_cfm rd_cfm; + printk("Read FW mem: %08x\n", rd_addr); + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, rd_addr, &rd_cfm))) { + return -1; + } + printk("cfm: [%08x] = %08x\n", rd_cfm.memaddr, rd_cfm.memdata); + + /* fw start */ + printk("Start app: %08x\n", fw_addr); + if ((ret = rwnx_send_dbg_start_app_req(rwnx_hw, fw_addr, HOST_START_APP_AUTO))) { + return -1; + } + return 0; +} +#endif +/** + * + */ + + + #if defined CONFIG_GPIO_WAKEUP || defined CONFIG_WOWLAN +static const struct wiphy_wowlan_support aic_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_MAGIC_PKT, +}; +#endif + +extern int get_hardware_info(void); + +#ifdef AICWF_USB_SUPPORT +u32 usbcfg_tbl[][2] = { + {0x40200028, 0x0021047e}, + {0x40200024, 0x0000011d}, +}; + +static void aicwf_usb_config(struct rwnx_hw *rwnx_hw) +{ + int usbcfg_num = 0; + int ret = 0, cnt = 0; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + const u32 mem_addr = 0x40200024; + + ret = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &rd_mem_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "%x rd fail: %d\n", mem_addr, ret); + return; + } + AICWFDBG(LOGINFO, "usb config read %x\n", rd_mem_addr_cfm.memdata); + if ((rd_mem_addr_cfm.memdata & 0xffff) == 0x119) { + cnt = 0; + usbcfg_num = sizeof(usbcfg_tbl) / sizeof(u32) / 2; + for (cnt = 0; cnt < usbcfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(rwnx_hw, usbcfg_tbl[cnt][0], usbcfg_tbl[cnt][1]); + if (ret) { + AICWFDBG(LOGERROR, "%x write fail: %d\n", usbcfg_tbl[cnt][0], ret); + return; + } + } + } +} +#endif // (AICWF_USB_SUPPORT) + +static int start_from_bootrom(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + + /* memory access */ +#ifdef CONFIG_ROM_PATCH_EN + const u32 rd_addr = RAM_LMAC_FW_ADDR; + const u32 fw_addr = RAM_LMAC_FW_ADDR; +#else + const u32 rd_addr = RAM_FMAC_FW_ADDR; + const u32 fw_addr = RAM_FMAC_FW_ADDR; +#endif + u32 boot_type; + struct dbg_mem_read_cfm rd_cfm; + AICWFDBG(LOGINFO, "Read FW mem: %08x\n", rd_addr); + if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, rd_addr, &rd_cfm))) { + return -1; + } + AICWFDBG(LOGINFO, "cfm: [%08x] = %08x\n", rd_cfm.memaddr, rd_cfm.memdata); + + if (testmode == 0) { + boot_type = HOST_START_APP_DUMMY; + } else { + boot_type = HOST_START_APP_AUTO; + } + /* fw start */ + AICWFDBG(LOGINFO, "Start app: %08x, %d\n", fw_addr, boot_type); + if ((ret = rwnx_send_dbg_start_app_req(rwnx_hw, fw_addr, boot_type))) { + return -1; + } + return 0; +} + + +int rwnx_ic_system_init(struct rwnx_hw *rwnx_hw){ + + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + system_config(rwnx_hw); + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ +#ifdef AICWF_USB_SUPPORT + aicwf_usb_config(rwnx_hw); +#endif + system_config(rwnx_hw); + if (rwnx_platform_on(rwnx_hw, NULL)) + return -1; + +#if defined(CONFIG_START_FROM_BOOTROM) + if (chip_sub_id < 2) { + if (wdt_config(rwnx_hw)) { + return -1; + } + } + if (start_from_bootrom(rwnx_hw)) + return -1; +#endif + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){ + rwnx_plat_userconfig_load_8800d80(rwnx_hw); + #ifdef CONFIG_POWER_LIMIT + rwnx_plat_powerlimit_load_8800d80(rwnx_hw); + #endif + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2){ + rwnx_plat_userconfig_load_8800d80x2(rwnx_hw); + #ifdef CONFIG_POWER_LIMIT + rwnx_plat_powerlimit_load_8800d80x2(rwnx_hw); + #endif + } + + return 0; +} + + +int rwnx_ic_rf_init(struct rwnx_hw *rwnx_hw){ + struct mm_set_rf_calib_cfm cfm; + int ret = 0; +#ifdef CONFIG_5M10M + uint32_t hwconfig_id = 4; + int32_t param[1]; + param[0] = BWMODE10M; +#endif + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + if ((ret = rwnx_send_txpwr_idx_req(rwnx_hw))) { + return -1; + } + + if ((ret = rwnx_send_txpwr_ofst_req(rwnx_hw))) { + return -1; + } + + if (testmode == 0) { + if ((ret = rwnx_send_rf_calib_req(rwnx_hw, &cfm))) + return -1; + } + + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ + if ((ret = aicwf_set_rf_config_8800dc(rwnx_hw, &cfm))) + return -1; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){ + if ((ret = aicwf_set_rf_config_8800d80(rwnx_hw, &cfm))) + return -1; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2){ + if ((ret = aicwf_set_rf_config_8800d80x2(rwnx_hw, &cfm))) + return -1; + } +#ifdef CONFIG_5M10M + rwnx_send_vendor_hwconfig_req(rwnx_hw, hwconfig_id, param, NULL); +#endif + return 0; +} +void aic_ipc_setting(struct rwnx_vif *rwnx_vif){ + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + uint32_t hw_edca = 1; + uint32_t hw_cca = 3; + int32_t param[14]; + int32_t cca[5]= {0x10, 0, 0, 0, 0}; + + param[0] = 0xFA522; param[1] = 0xFA522; param[2] = 0xFA522; param[3] = 0xFA522; + param[4] = rwnx_vif->vif_index; + param[5] = 0x1e; param[6] = 0; param[7] = 0; param[8] =0;param[9] = 0x2;param[10] = 0x2;param[11] = 0x7;param[12] = 0;;param[13] = 1; + rwnx_send_vendor_hwconfig_req(rwnx_hw, hw_edca, param, NULL); + rwnx_send_vendor_hwconfig_req(rwnx_hw, hw_cca, cca, NULL); +} +extern int get_adap_test(void); + +extern void *aicwf_prealloc_txq_alloc(size_t size); +extern int aicwf_vendor_init(struct wiphy *wiphy); +extern char default_ccode[]; +int rwnx_cfg80211_init(struct rwnx_plat *rwnx_plat, void **platform_data) +{ + struct rwnx_hw *rwnx_hw; + struct rwnx_conf_file init_conf; + int ret = 0; + struct wiphy *wiphy; + struct rwnx_vif *vif; + int i; + u8 dflt_mac[ETH_ALEN] = { 0x88, 0x00, 0x33, 0x77, 0x10, 0x99}; + struct mm_get_fw_version_cfm fw_version; + u8_l mac_addr_efuse[ETH_ALEN]; + u8 mac_addr[ETH_ALEN]; +#ifndef USE_5G + struct aic_feature_t feature; +#endif + struct mm_set_stack_start_cfm set_start_cfm; +#ifdef CONFIG_TEMP_COMP + struct mm_set_vendor_swconfig_cfm swconfig_cfm; +#endif + + int nx_remote_sta_max = NX_REMOTE_STA_MAX; + RWNX_DBG(RWNX_FN_ENTRY_STR); + + +if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_remote_sta_max = NX_REMOTE_STA_MAX_FOR_OLD_IC; +} + + +//#ifndef CONFIG_RFTEST +#ifdef CONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE + get_random_bytes(&dflt_mac[4], 2); +#endif +//#endif +#ifdef CONFIG_POWER_LIMIT + memcpy(country_code, default_ccode, 4); +#endif + /* create a new wiphy for use with cfg80211 */ + AICWFDBG(LOGINFO, "%s sizeof(struct rwnx_hw):%d \r\n", __func__, (int)sizeof(struct rwnx_hw)); + wiphy = wiphy_new(&rwnx_cfg80211_ops, sizeof(struct rwnx_hw)); + //dev_set_name(&wiphy->dev,"aicphy%d",0); + + if (!wiphy) { + dev_err(rwnx_platform_get_dev(rwnx_plat), "Failed to create new wiphy\n"); + ret = -ENOMEM; + goto err_out; + } + + rwnx_hw = wiphy_priv(wiphy); + rwnx_hw->wiphy = wiphy; + rwnx_hw->plat = rwnx_plat; + rwnx_hw->dev = rwnx_platform_get_dev(rwnx_plat); +#ifdef AICWF_SDIO_SUPPORT + rwnx_hw->sdiodev = rwnx_plat->sdiodev; + rwnx_plat->sdiodev->rwnx_hw = rwnx_hw; + rwnx_hw->cmd_mgr = &rwnx_plat->sdiodev->cmd_mgr; +#else + rwnx_hw->usbdev = rwnx_plat->usbdev; + rwnx_plat->usbdev->rwnx_hw = rwnx_hw; + rwnx_hw->cmd_mgr = &rwnx_plat->usbdev->cmd_mgr; +#endif + rwnx_hw->mod_params = &rwnx_mod_params; + rwnx_hw->tcp_pacing_shift = 7; + +#ifdef CONFIG_SCHED_SCAN + rwnx_hw->is_sched_scan = false; +#endif//CONFIG_SCHED_SCAN + + aicwf_wakeup_lock_init(rwnx_hw); + + rwnx_init_aic(rwnx_hw); + /* set device pointer for wiphy */ + set_wiphy_dev(wiphy, rwnx_hw->dev); + + /* Create cache to allocate sw_txhdr */ + rwnx_hw->sw_txhdr_cache = KMEM_CACHE(rwnx_sw_txhdr, 0); + if (!rwnx_hw->sw_txhdr_cache) { + wiphy_err(wiphy, "Cannot allocate cache for sw TX header\n"); + ret = -ENOMEM; + goto err_cache; + } + + +#ifdef CONFIG_FILTER_TCP_ACK + AICWFDBG(LOGINFO, "%s: FILTER_TCP_ACK\n", __func__); + tcp_ack_init(rwnx_hw); +#endif + +#if 0 + if ((ret = rwnx_parse_configfile(rwnx_hw, RWNX_CONFIG_FW_NAME, &init_conf))) { + wiphy_err(wiphy, "rwnx_parse_configfile failed\n"); + goto err_config; + } +#else + memcpy(init_conf.mac_addr, dflt_mac, ETH_ALEN); + + if(wifi_mac_addr != 0){ + sscanf(wifi_mac_addr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",&mac_addr[0],&mac_addr[1],&mac_addr[2],&mac_addr[3],&mac_addr[4],&mac_addr[5]); + if (mac_addr[0] | mac_addr[1] | mac_addr[2] | mac_addr[3]) + memcpy(init_conf.mac_addr, mac_addr, ETH_ALEN); + } +#endif + + rwnx_hw->vif_started = 0; + rwnx_hw->monitor_vif = RWNX_INVALID_VIF; + rwnx_hw->adding_sta = false; + + rwnx_hw->scan_ie.addr = NULL; + + for (i = 0; i < NX_VIRT_DEV_MAX + nx_remote_sta_max; i++){ + rwnx_hw->avail_idx_map |= BIT(i); + } + + rwnx_hwq_init(rwnx_hw); + +#ifdef CONFIG_PREALLOC_TXQ + rwnx_hw->txq = (struct rwnx_txq*)aicwf_prealloc_txq_alloc(sizeof(struct rwnx_txq)*NX_NB_TXQ); +#endif + + for (i = 0; i < NX_NB_TXQ; i++) { + rwnx_hw->txq[i].idx = TXQ_INACTIVE; + } + + rwnx_mu_group_init(rwnx_hw); + + /* Initialize RoC element pointer to NULL, indicate that RoC can be started */ + rwnx_hw->roc_elem = NULL; + /* Cookie can not be 0 */ + rwnx_hw->roc_cookie_cnt = 1; + + INIT_LIST_HEAD(&rwnx_hw->vifs); + INIT_LIST_HEAD(&rwnx_hw->defrag_list); + spin_lock_init(&rwnx_hw->defrag_lock); + mutex_init(&rwnx_hw->mutex); + mutex_init(&rwnx_hw->dbgdump_elem.mutex); + spin_lock_init(&rwnx_hw->tx_lock); + spin_lock_init(&rwnx_hw->cb_lock); + + INIT_WORK(&rwnx_hw->apmStalossWork, apm_staloss_work_process); + rwnx_hw->apmStaloss_wq = create_singlethread_workqueue("apmStaloss_wq"); + if (!rwnx_hw->apmStaloss_wq) { + txrx_err("insufficient memory to create apmStaloss workqueue.\n"); + goto err_cache; + } + + wiphy->mgmt_stypes = rwnx_default_mgmt_stypes; + +#ifdef CONFIG_FWLOG_EN + rwnx_hw->fwlog_en = true; +#else + rwnx_hw->fwlog_en = false; +#endif + rwnx_hw->scanning = false; + rwnx_hw->p2p_working = false; + //init ic system + if((ret = rwnx_ic_system_init(rwnx_hw))){ + goto err_lmac_reqs; + } + +#ifdef USE_5G + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ + ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, 0, 0, rwnx_hw->fwlog_en, &set_start_cfm); + } else { + ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, 0, CO_BIT(5), rwnx_hw->fwlog_en, &set_start_cfm); + } +#else + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2){ + ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, 0, CO_BIT(5), rwnx_hw->fwlog_en, &set_start_cfm); + } else { + ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, get_hardware_info(), feature.hwinfo, rwnx_hw->fwlog_en, &set_start_cfm); + } +#endif + + if (ret){ + goto err_lmac_reqs; + } + + AICWFDBG(LOGINFO, "is 5g support = %d, vendor_info = 0x%02X\n", set_start_cfm.is_5g_support, set_start_cfm.vendor_info); + rwnx_hw->band_5g_support = set_start_cfm.is_5g_support; + + ret = rwnx_send_get_fw_version_req(rwnx_hw, &fw_version); + memcpy(wiphy->fw_version, fw_version.fw_version, fw_version.fw_version_len>32? 32 : fw_version.fw_version_len>32); + AICWFDBG(LOGINFO, "Firmware Version: %s\r\n", fw_version.fw_version); + + wiphy->bands[NL80211_BAND_2GHZ] = &rwnx_band_2GHz; +//#ifdef USE_5G + if(rwnx_hw->band_5g_support){ + wiphy->bands[NL80211_BAND_5GHZ] = &rwnx_band_5GHz; + } +//#endif + wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_AP_VLAN) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) + #ifndef CONFIG_USE_P2P0 + BIT(NL80211_IFTYPE_P2P_DEVICE) | + #endif + #endif + BIT(NL80211_IFTYPE_MONITOR); + +#if defined CONFIG_GPIO_WAKEUP || defined CONFIG_WOWLAN + /* Set WoWLAN flags */ + printk("%s Wowlan support\r\n", __func__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + wiphy->wowlan = &aic_wowlan_support; +#else + wiphy->wowlan.flags = aic_wowlan_support.flags; +#endif +#endif + + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) + WIPHY_FLAG_HAS_CHANNEL_SWITCH | + #endif + WIPHY_FLAG_4ADDR_STATION | + WIPHY_FLAG_4ADDR_AP; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) + wiphy->max_num_csa_counters = BCN_MAX_CSA_CPT; + #endif + + wiphy->max_remain_on_channel_duration = rwnx_hw->mod_params->roc_dur_max; + + wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN | + NL80211_FEATURE_SK_TX_STATUS | + NL80211_FEATURE_VIF_TXPOWER | + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) + NL80211_FEATURE_ACTIVE_MONITOR | + #endif + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + #endif + 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL) + wiphy->features |= NL80211_FEATURE_SAE; +#endif + + if (rwnx_mod_params.tdls) + /* TDLS support */ + wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + + wiphy->iface_combinations = rwnx_combinations; + /* -1 not to include combination with radar detection, will be re-added in + rwnx_handle_dynparams if supported */ + wiphy->n_iface_combinations = ARRAY_SIZE(rwnx_combinations) - 1; + wiphy->reg_notifier = rwnx_reg_notifier; + + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + + rwnx_enable_wapi(rwnx_hw); + + wiphy->cipher_suites = cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites) - NB_RESERVED_CIPHER; + + rwnx_hw->ext_capa[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING; + rwnx_hw->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + wiphy->extended_capabilities = rwnx_hw->ext_capa; + wiphy->extended_capabilities_mask = rwnx_hw->ext_capa; + wiphy->extended_capabilities_len = ARRAY_SIZE(rwnx_hw->ext_capa); +#endif +#ifdef CONFIG_SCHED_SCAN +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) + wiphy->max_sched_scan_reqs = 1; +#endif + wiphy->max_sched_scan_ssids = SCAN_SSID_MAX;//16; + wiphy->max_match_sets = SCAN_SSID_MAX;//16; + wiphy->max_sched_scan_ie_len = 2048; +#endif//CONFIG_SCHED_SCAN + + tasklet_init(&rwnx_hw->task, rwnx_task, (unsigned long)rwnx_hw); + + //init ic rf + if((ret = rwnx_ic_rf_init(rwnx_hw))){ + goto err_lmac_reqs; + } + + if ((ret = rwnx_send_get_macaddr_req(rwnx_hw, (struct mm_get_mac_addr_cfm *)mac_addr_efuse))) + goto err_lmac_reqs; + if (mac_addr_efuse[0] | mac_addr_efuse[1] | mac_addr_efuse[2] | mac_addr_efuse[3]) + { + memcpy(init_conf.mac_addr, mac_addr_efuse, ETH_ALEN); + }else{ + AICWFDBG(LOGERROR, "no mac address in efuse!"); + } + + AICWFDBG(LOGINFO, "get macaddr:%x,%x\r\n", mac_addr_efuse[0], mac_addr_efuse[5]); + + + memcpy(wiphy->perm_addr, init_conf.mac_addr, ETH_ALEN); + + /* Reset FW */ + if ((ret = rwnx_send_reset(rwnx_hw))) + goto err_lmac_reqs; + +#ifdef CONFIG_TEMP_COMP + rwnx_send_set_temp_comp_req(rwnx_hw, &swconfig_cfm); +#endif + + if ((ret = rwnx_send_version_req(rwnx_hw, &rwnx_hw->version_cfm))) + goto err_lmac_reqs; + rwnx_set_vers(rwnx_hw); + + if ((ret = rwnx_handle_dynparams(rwnx_hw, rwnx_hw->wiphy))) + goto err_lmac_reqs; + + rwnx_enable_mesh(rwnx_hw); + rwnx_radar_detection_init(&rwnx_hw->radar); + +#ifdef CONFIG_RADAR_OR_IR_DETECT + rwnx_radar_set_domain(&rwnx_hw->radar, NL80211_DFS_FCC); +#endif + + /* Set parameters to firmware */ + + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 || + ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && testmode == 0)) { + rwnx_send_me_config_req(rwnx_hw); + } + + /* Only monitor mode supported when custom channels are enabled */ + if (rwnx_mod_params.custchan) { + rwnx_limits[0].types = BIT(NL80211_IFTYPE_MONITOR); + rwnx_limits_dfs[0].types = BIT(NL80211_IFTYPE_MONITOR); + } + + aicwf_vendor_init(wiphy); + + if ((ret = wiphy_register(wiphy))) { + wiphy_err(wiphy, "Could not register wiphy device\n"); + goto err_register_wiphy; + } + + /* Update regulatory (if needed) and set channel parameters to firmware + (must be done after WiPHY registration) */ + rwnx_custregd(rwnx_hw, wiphy); + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 || + ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && testmode == 0)) { + rwnx_send_me_chan_config_req(rwnx_hw, default_ccode); + } + *platform_data = rwnx_hw; + +#ifdef CONFIG_DEBUG_FS + if ((ret = rwnx_dbgfs_register(rwnx_hw, "rwnx"))) { + wiphy_err(wiphy, "Failed to register debugfs entries"); + goto err_debugfs; + } +#endif + rtnl_lock(); + + /* Add an initial station interface */ + vif = rwnx_interface_add(rwnx_hw, "wlan%d", NET_NAME_UNKNOWN, + NL80211_IFTYPE_STATION, NULL); + + #ifdef CONFIG_RWNX_MON_DATA + /* Add an initial station interface */ + vif = rwnx_interface_add(rwnx_hw, "wlan%d", 1, + NL80211_IFTYPE_MONITOR, NULL); + #endif + + rtnl_unlock(); + + if (!vif) { + wiphy_err(wiphy, "Failed to instantiate a network device\n"); + ret = -ENOMEM; + goto err_add_interface; + } + +#if 0 + wiphy_info(wiphy, "New interface create %s", vif->ndev->name); +#endif + + AICWFDBG(LOGINFO, "New interface create %s\r\n", vif->ndev->name); + +#ifdef CONFIG_USE_P2P0 + + rtnl_lock(); + /* Add an initial p2p0 interface */ + vif = rwnx_interface_add(rwnx_hw, "p2p%d", NET_NAME_UNKNOWN, + NL80211_IFTYPE_STATION, NULL); + vif->is_p2p_vif = 1; + rtnl_unlock(); + + if (!vif) { + wiphy_err(wiphy, "Failed to instantiate a network device\n"); + ret = -ENOMEM; + goto err_add_interface; + } + + //wiphy_info(wiphy, "New interface create %s", vif->ndev->name); + AICWFDBG(LOGINFO, "New interface create %s \r\n", vif->ndev->name); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + init_timer(&rwnx_hw->p2p_alive_timer); + rwnx_hw->p2p_alive_timer.data = (unsigned long)vif; + rwnx_hw->p2p_alive_timer.function = aicwf_p2p_alive_timeout; +#else + timer_setup(&rwnx_hw->p2p_alive_timer, aicwf_p2p_alive_timeout, 0); +#endif + rwnx_hw->is_p2p_alive = 0; + rwnx_hw->is_p2p_connected = 0; + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); +#endif + +#ifdef CONFIG_DYNAMIC_PWR + rwnx_hw->pwrloss_lvl = 0; + rwnx_hw->sta_rssi_idx = 0; + rwnx_hw->read_rssi_vif = vif; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + init_timer(&rwnx_hw->pwrloss_timer); + rwnx_hw->pwrloss_timer.data = (ulong) vif; + rwnx_hw->pwrloss_timer.function = aicwf_pwrloss_timer; +#else + timer_setup(&rwnx_hw->pwrloss_timer, aicwf_pwrloss_timer, 0); +#endif + INIT_WORK(&rwnx_hw->pwrloss_work, aicwf_pwrloss_worker); + mod_timer(&rwnx_hw->pwrloss_timer, jiffies + msecs_to_jiffies(RSSI_GET_INTERVAL)); +#endif + +#ifdef CONFIG_FOR_IPCAM + if(!testmode && !get_adap_test()) { + aic_ipc_setting(vif); + } +#endif +#ifdef CONFIG_WOWLAN + rwnx_send_set_pkt_filter_req(rwnx_hw, wowlan_param1); +#endif + +#ifdef CONFIG_BAND_STEERING + rwnx_hw->iface_idx = CONFIG_IFACE_NUMBER - 1; + aicwf_nl_init(); +#endif + + return 0; + +err_add_interface: +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister(rwnx_hw); +err_debugfs: +#endif +if(rwnx_hw->wiphy){ + wiphy_unregister(rwnx_hw->wiphy); +} +err_register_wiphy: +err_lmac_reqs: + AICWFDBG(LOGERROR, "err_lmac_reqs\n"); + flush_workqueue(rwnx_hw->apmStaloss_wq); + destroy_workqueue(rwnx_hw->apmStaloss_wq); + //rwnx_fw_trace_dump(rwnx_hw); + rwnx_platform_off(rwnx_hw, NULL); + kmem_cache_destroy(rwnx_hw->sw_txhdr_cache); +//err_platon: +//err_config: +err_cache: + aicwf_wakeup_lock_deinit(rwnx_hw); + wiphy_free(wiphy); +err_out: + return ret; +} + +/** + * + */ + +void rwnx_cfg80211_deinit(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_stack_start_cfm set_start_cfm; + struct defrag_ctrl_info *defrag_ctrl = NULL; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + rwnx_send_set_stack_start_req(rwnx_hw, 0, 0, 0, 0, &set_start_cfm); + + rwnx_hw->fwlog_en = 0; + rwnx_hw->scanning = 0; + rwnx_hw->p2p_working = 0; + +#ifdef CONFIG_BAND_STEERING + aicwf_nl_deinit(); +#endif + +#ifdef CONFIG_DYNAMIC_PWR + if(timer_pending(&rwnx_hw->pwrloss_timer)){ + del_timer_sync(&rwnx_hw->pwrloss_timer);} + cancel_work_sync(&rwnx_hw->pwrloss_work); +#endif + + spin_lock_bh(&rwnx_hw->defrag_lock); + if (!list_empty(&rwnx_hw->defrag_list)) { + list_for_each_entry(defrag_ctrl, &rwnx_hw->defrag_list, list) { + list_del_init(&defrag_ctrl->list); + if (timer_pending(&defrag_ctrl->defrag_timer)) + del_timer_sync(&defrag_ctrl->defrag_timer); + dev_kfree_skb(defrag_ctrl->skb); + kfree(defrag_ctrl); + } + } + spin_unlock_bh(&rwnx_hw->defrag_lock); + +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister(rwnx_hw); +#endif + flush_workqueue(rwnx_hw->apmStaloss_wq); + destroy_workqueue(rwnx_hw->apmStaloss_wq); + + rwnx_wdev_unregister(rwnx_hw); + if(rwnx_hw->wiphy){ + AICWFDBG(LOGINFO, "%s wiphy_unregister \r\n", __func__); + wiphy_unregister(rwnx_hw->wiphy); + } + rwnx_radar_detection_deinit(&rwnx_hw->radar); + rwnx_platform_off(rwnx_hw, NULL); + kmem_cache_destroy(rwnx_hw->sw_txhdr_cache); +#ifdef CONFIG_FILTER_TCP_ACK + tcp_ack_deinit(rwnx_hw); +#endif + aicwf_wakeup_lock_deinit(rwnx_hw); + + if(rwnx_hw->wiphy){ + wiphy_free(rwnx_hw->wiphy); + } +} + +static void aicsmac_driver_register(void) +{ +#ifdef AICWF_SDIO_SUPPORT + aicwf_sdio_register(); +#endif +#ifdef AICWF_USB_SUPPORT + aicwf_usb_register(); +#endif +#ifdef AICWF_PCIE_SUPPORT + aicwf_pcie_register(); +#endif +} + +//static DECLARE_WORK(aicsmac_driver_work, aicsmac_driver_register); + +struct completion hostif_register_done; + +#define REGISTRATION_TIMEOUT 9000 + +void aicwf_hostif_ready(void) +{ + g_rwnx_plat->enabled = true; + complete(&hostif_register_done); +} + +static int __init rwnx_mod_init(void) +{ + + RWNX_DBG(RWNX_FN_ENTRY_STR); + rwnx_print_version(); + AICWFDBG(LOGINFO, "RELEASE DATE:%s \r\n", RELEASE_DATE); + rwnx_init_cmd_array(); + + sema_init(&aicwf_deinit_sem, 1); + atomic_set(&aicwf_deinit_atomic, 1); + + init_completion(&hostif_register_done); + + aicsmac_driver_register(); + +#ifdef AICWF_SDIO_SUPPORT + if ((wait_for_completion_timeout(&hostif_register_done, msecs_to_jiffies(REGISTRATION_TIMEOUT)) == 0)) { + AICWFDBG(LOGERROR, "register_driver timeout or error\n"); + aicwf_sdio_exit(); + return -ENODEV; +} + +#endif /* AICWF_SDIO_SUPPORT */ +#ifdef AICWF_USB_SUPPORT + //aicwf_usb_exit(); +#endif /*AICWF_USB_SUPPORT */ + + +#ifdef AICWF_PCIE_SUPPORT + return rwnx_platform_register_drv(); +#else + return 0; +#endif +} + +/** + * + */ +static void __exit rwnx_mod_exit(void) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#ifdef AICWF_PCIE_SUPPORT + rwnx_platform_unregister_drv(); +#endif + +#ifdef AICWF_SDIO_SUPPORT + aicwf_sdio_exit(); +#endif + +#ifdef AICWF_USB_SUPPORT + aicwf_usb_exit(); +#endif + rwnx_free_cmd_array(); + AICWFDBG(LOGINFO, "%s exit\r\n", __func__); +} +module_param(wifi_mac_addr,charp, 0); +MODULE_PARM_DESC(wifi_mac_addr, "Configures mac addr."); +module_init(rwnx_mod_init); +module_exit(rwnx_mod_exit); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) +MODULE_IMPORT_NS("VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver"); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) +MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver); +#endif +MODULE_FIRMWARE(RWNX_CONFIG_FW_NAME); + +MODULE_DESCRIPTION(RW_DRV_DESCRIPTION); +MODULE_VERSION(RWNX_VERS_MOD); +MODULE_AUTHOR(RW_DRV_COPYRIGHT " " RW_DRV_AUTHOR); +MODULE_LICENSE("GPL"); + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_main.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_main.h new file mode 100644 index 0000000..ad4e2fc --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_main.h @@ -0,0 +1,65 @@ +/** + ****************************************************************************** + * + * @file rwnx_main.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_MAIN_H_ +#define _RWNX_MAIN_H_ + +#include "rwnx_defs.h" + +int rwnx_cfg80211_init(struct rwnx_plat *rwnx_plat, void **platform_data); +void rwnx_cfg80211_deinit(struct rwnx_hw *rwnx_hw); +int rwnx_fill_station_info(struct rwnx_sta *sta, struct rwnx_vif *vif, + struct station_info *sinfo, u8 *phymode, u32 *tx_phyrate, u32 *rx_phyrate); + +extern int testmode; +extern u8 chip_id; +extern u8 chip_sub_id; +extern u8 chip_mcu_id; + +#define CHIP_ID_H_MASK 0xC0 +#define IS_CHIP_ID_H() ((chip_id & CHIP_ID_H_MASK) == CHIP_ID_H_MASK) + +#define RSSI_GET_INTERVAL (10 * 1000) //time interval +#define RSSI_THD_0 (-20) //rssi 0 (dBm) +#define RSSI_THD_1 (-30) //rssi 1 (dBm) +#define RSSI_THD_2 (-75) //rssi 2 (dBm) + +#define PWR_LOSS_LVL0 (-10) //RSSI > RSSI_THD_0 +#define PWR_LOSS_LVL1 (-5 ) //RSSI_THD_1 < RSSI ap.proxy_list, list) { + if (p_cur_proxy->local != local) { + continue; + } + + if (!memcmp(&p_cur_proxy->ext_sta_addr, p_sta_addr, ETH_ALEN)) { + p_mesh_proxy = p_cur_proxy; + break; + } + } + + /* Return the found information */ + return p_mesh_proxy; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mesh.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mesh.h new file mode 100644 index 0000000..db8950f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mesh.h @@ -0,0 +1,45 @@ +/** + **************************************************************************************** + * + * @file rwnx_mesh.h + * + * @brief VHT Beamformer function declarations + * + * Copyright (C) RivieraWaves 2016-2019 + * + **************************************************************************************** + */ + +#ifndef _RWNX_MESH_H_ +#define _RWNX_MESH_H_ + +/** + * INCLUDE FILES + **************************************************************************************** + */ + +#include "rwnx_defs.h" + +/** + * DEFINES + **************************************************************************************** + */ + +/** + * TYPE DEFINITIONS + **************************************************************************************** + */ + +/** + * FUNCTION DECLARATIONS + **************************************************************************************** + */ + +/** + **************************************************************************************** + * @brief TODO [LT] + **************************************************************************************** + */ +struct rwnx_mesh_proxy *rwnx_get_mesh_proxy_info(struct rwnx_vif *p_rwnx_vif, u8 *p_sta_addr, bool local); + +#endif /* _RWNX_MESH_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mod_params.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mod_params.c new file mode 100644 index 0000000..132b8ef --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mod_params.c @@ -0,0 +1,1863 @@ +/** +****************************************************************************** +* +* @file rwnx_mod_params.c +* +* @brief Set configuration according to modules parameters +* +* Copyright (C) RivieraWaves 2012-2019 +* +****************************************************************************** +*/ +#include +#include + +#include "rwnx_defs.h" +#include "rwnx_tx.h" +#include "hal_desc.h" +#include "rwnx_cfgfile.h" +#include "rwnx_dini.h" +#include "reg_access.h" +#include "rwnx_compat.h" + +#ifdef CONFIG_RWNX_FULLMAC +#define COMMON_PARAM(name, default_softmac, default_fullmac) \ + .name = default_fullmac, +#define SOFTMAC_PARAM(name, default) +#define FULLMAC_PARAM(name, default) .name = default, +#endif /* CONFIG_RWNX_FULLMAC */ + +extern int reg_regdb_size; + +struct rwnx_mod_params rwnx_mod_params = { + /* common parameters */ + COMMON_PARAM(ht_on, true, true) + COMMON_PARAM(vht_on, true, true) + COMMON_PARAM(he_on, true, true) + COMMON_PARAM(mcs_map, IEEE80211_VHT_MCS_SUPPORT_0_9, IEEE80211_VHT_MCS_SUPPORT_0_9) + COMMON_PARAM(he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_11, IEEE80211_HE_MCS_SUPPORT_0_11) + COMMON_PARAM(he_ul_on, false, false) + COMMON_PARAM(ldpc_on, true, true) + COMMON_PARAM(stbc_on, true, true) + COMMON_PARAM(gf_rx_on, false, false) + COMMON_PARAM(phy_cfg, 2, 2) + COMMON_PARAM(uapsd_timeout, 300, 300) + COMMON_PARAM(ap_uapsd_on, true, true) + COMMON_PARAM(sgi, true, true) + COMMON_PARAM(sgi80, true, true) + COMMON_PARAM(use_2040, 1, 1) + COMMON_PARAM(nss, 2, 2) + COMMON_PARAM(amsdu_rx_max, 1, 1) + COMMON_PARAM(bfmee, true, true) + COMMON_PARAM(bfmer, false, false) + COMMON_PARAM(mesh, true, true) + COMMON_PARAM(murx, true, true) + COMMON_PARAM(mutx, true, true) + COMMON_PARAM(mutx_on, true, true) + COMMON_PARAM(use_80, true, true) + COMMON_PARAM(custregd, true, true) + COMMON_PARAM(custchan, false, false) + COMMON_PARAM(roc_dur_max, 500, 500) + COMMON_PARAM(listen_itv, 0, 0) + COMMON_PARAM(listen_bcmc, true, true) + COMMON_PARAM(lp_clk_ppm, 20, 20) + COMMON_PARAM(ps_on, true, true) + COMMON_PARAM(tx_lft, RWNX_TX_LIFETIME_MS, RWNX_TX_LIFETIME_MS) + COMMON_PARAM(amsdu_maxnb, NX_TX_PAYLOAD_MAX, NX_TX_PAYLOAD_MAX) + // By default, only enable UAPSD for Voice queue (see IEEE80211_DEFAULT_UAPSD_QUEUE comment) + COMMON_PARAM(uapsd_queues, IEEE80211_WMM_IE_STA_QOSINFO_AC_VO, IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + COMMON_PARAM(tdls, false, false) + COMMON_PARAM(uf, false, false) + COMMON_PARAM(auto_reply, false, false) + COMMON_PARAM(ftl, "", "") + COMMON_PARAM(dpsm, false, false) + + /* SOFTMAC only parameters */ + SOFTMAC_PARAM(mfp_on, false) + SOFTMAC_PARAM(gf_on, false) + SOFTMAC_PARAM(bwsig_on, true) + SOFTMAC_PARAM(dynbw_on, true) + SOFTMAC_PARAM(agg_tx, true) + SOFTMAC_PARAM(amsdu_force, 2) + SOFTMAC_PARAM(rc_probes_on, false) + SOFTMAC_PARAM(cmon, true) + SOFTMAC_PARAM(hwscan, true) + SOFTMAC_PARAM(autobcn, true) + SOFTMAC_PARAM(dpsm, true) + + /* FULLMAC only parameters */ + FULLMAC_PARAM(ant_div, true) +}; + +#ifdef CONFIG_RWNX_FULLMAC +/* FULLMAC specific parameters*/ +module_param_named(ant_div, rwnx_mod_params.ant_div, bool, S_IRUGO); +MODULE_PARM_DESC(ant_div, "Enable Antenna Diversity (Default: 1)"); +#endif /* CONFIG_RWNX_FULLMAC */ + +module_param_named(ht_on, rwnx_mod_params.ht_on, bool, S_IRUGO); +MODULE_PARM_DESC(ht_on, "Enable HT (Default: 1)"); + +module_param_named(vht_on, rwnx_mod_params.vht_on, bool, S_IRUGO); +MODULE_PARM_DESC(vht_on, "Enable VHT (Default: 1)"); + +module_param_named(he_on, rwnx_mod_params.he_on, bool, S_IRUGO); +MODULE_PARM_DESC(he_on, "Enable HE (Default: 1)"); + +module_param_named(mcs_map, rwnx_mod_params.mcs_map, int, S_IRUGO); +MODULE_PARM_DESC(mcs_map, "VHT MCS map value 0: MCS0_7, 1: MCS0_8, 2: MCS0_9" + " (Default: 2)"); + +module_param_named(he_mcs_map, rwnx_mod_params.he_mcs_map, int, S_IRUGO); +MODULE_PARM_DESC(he_mcs_map, "HE MCS map value 0: MCS0_7, 1: MCS0_9, 2: MCS0_11" + " (Default: 2)"); + +module_param_named(he_ul_on, rwnx_mod_params.he_ul_on, bool, S_IRUGO); +MODULE_PARM_DESC(he_ul_on, "Enable HE OFDMA UL (Default: 0)"); + +module_param_named(amsdu_maxnb, rwnx_mod_params.amsdu_maxnb, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(amsdu_maxnb, "Maximum number of MSDUs inside an A-MSDU in TX: (Default: NX_TX_PAYLOAD_MAX)"); + +module_param_named(ps_on, rwnx_mod_params.ps_on, bool, S_IRUGO); +MODULE_PARM_DESC(ps_on, "Enable PowerSaving (Default: 1-Enabled)"); + +module_param_named(tx_lft, rwnx_mod_params.tx_lft, int, 0644); +MODULE_PARM_DESC(tx_lft, "Tx lifetime (ms) - setting it to 0 disables retries " + "(Default: "__stringify(RWNX_TX_LIFETIME_MS)")"); + +module_param_named(ldpc_on, rwnx_mod_params.ldpc_on, bool, S_IRUGO); +MODULE_PARM_DESC(ldpc_on, "Enable LDPC (Default: 1)"); + +module_param_named(stbc_on, rwnx_mod_params.stbc_on, bool, S_IRUGO); +MODULE_PARM_DESC(stbc_on, "Enable STBC in RX (Default: 1)"); + +module_param_named(gf_rx_on, rwnx_mod_params.gf_rx_on, bool, S_IRUGO); +MODULE_PARM_DESC(gf_rx_on, "Enable HT greenfield in reception (Default: 1)"); + +module_param_named(phycfg, rwnx_mod_params.phy_cfg, int, S_IRUGO); +MODULE_PARM_DESC(phycfg, + "0 <= phycfg <= 5 : RF Channel Conf (Default: 2(C0-A1-B2))"); + +module_param_named(uapsd_timeout, rwnx_mod_params.uapsd_timeout, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(uapsd_timeout, + "UAPSD Timer timeout, in ms (Default: 300). If 0, UAPSD is disabled"); + +module_param_named(uapsd_queues, rwnx_mod_params.uapsd_queues, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(uapsd_queues, "UAPSD Queues, integer value, must be seen as a bitfield\n" + " Bit 0 = VO\n" + " Bit 1 = VI\n" + " Bit 2 = BK\n" + " Bit 3 = BE\n" + " -> uapsd_queues=7 will enable uapsd for VO, VI and BK queues"); + +module_param_named(ap_uapsd_on, rwnx_mod_params.ap_uapsd_on, bool, S_IRUGO); +MODULE_PARM_DESC(ap_uapsd_on, "Enable UAPSD in AP mode (Default: 1)"); + +module_param_named(sgi, rwnx_mod_params.sgi, bool, S_IRUGO); +MODULE_PARM_DESC(sgi, "Advertise Short Guard Interval support (Default: 1)"); + +module_param_named(sgi80, rwnx_mod_params.sgi80, bool, S_IRUGO); +MODULE_PARM_DESC(sgi80, "Advertise Short Guard Interval support for 80MHz (Default: 1)"); + +module_param_named(use_2040, rwnx_mod_params.use_2040, bool, S_IRUGO); +MODULE_PARM_DESC(use_2040, "Use tweaked 20-40MHz mode (Default: 1)"); + +module_param_named(use_80, rwnx_mod_params.use_80, bool, S_IRUGO); +MODULE_PARM_DESC(use_80, "Enable 80MHz (Default: 1)"); + +module_param_named(custregd, rwnx_mod_params.custregd, bool, S_IRUGO); +MODULE_PARM_DESC(custregd, + "Use permissive custom regulatory rules (for testing ONLY) (Default: 0)"); + +module_param_named(custchan, rwnx_mod_params.custchan, bool, S_IRUGO); +MODULE_PARM_DESC(custchan, + "Extend channel set to non-standard channels (for testing ONLY) (Default: 0)"); + +module_param_named(nss, rwnx_mod_params.nss, int, S_IRUGO); +MODULE_PARM_DESC(nss, "1 <= nss <= 2 : Supported number of Spatial Streams (Default: 1)"); + +module_param_named(amsdu_rx_max, rwnx_mod_params.amsdu_rx_max, int, S_IRUGO); +MODULE_PARM_DESC(amsdu_rx_max, "0 <= amsdu_rx_max <= 2 : Maximum A-MSDU size supported in RX\n" + " 0: 3895 bytes\n" + " 1: 7991 bytes\n" + " 2: 11454 bytes\n" + " This value might be reduced according to the FW capabilities.\n" + " Default: 2"); + +module_param_named(bfmee, rwnx_mod_params.bfmee, bool, S_IRUGO); +MODULE_PARM_DESC(bfmee, "Enable Beamformee Capability (Default: 1-Enabled)"); + +module_param_named(bfmer, rwnx_mod_params.bfmer, bool, S_IRUGO); +MODULE_PARM_DESC(bfmer, "Enable Beamformer Capability (Default: 0-Disabled)"); + +module_param_named(mesh, rwnx_mod_params.mesh, bool, S_IRUGO); +MODULE_PARM_DESC(mesh, "Enable Meshing Capability (Default: 1-Enabled)"); + +module_param_named(murx, rwnx_mod_params.murx, bool, S_IRUGO); +MODULE_PARM_DESC(murx, "Enable MU-MIMO RX Capability (Default: 1-Enabled)"); + +module_param_named(mutx, rwnx_mod_params.mutx, bool, S_IRUGO); +MODULE_PARM_DESC(mutx, "Enable MU-MIMO TX Capability (Default: 1-Enabled)"); + +module_param_named(mutx_on, rwnx_mod_params.mutx_on, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(mutx_on, "Enable MU-MIMO transmissions (Default: 1-Enabled)"); + +module_param_named(roc_dur_max, rwnx_mod_params.roc_dur_max, int, S_IRUGO); +MODULE_PARM_DESC(roc_dur_max, "Maximum Remain on Channel duration"); + +module_param_named(listen_itv, rwnx_mod_params.listen_itv, int, S_IRUGO); +MODULE_PARM_DESC(listen_itv, "Maximum listen interval"); + +module_param_named(listen_bcmc, rwnx_mod_params.listen_bcmc, bool, S_IRUGO); +MODULE_PARM_DESC(listen_bcmc, "Wait for BC/MC traffic following DTIM beacon"); + +module_param_named(lp_clk_ppm, rwnx_mod_params.lp_clk_ppm, int, S_IRUGO); +MODULE_PARM_DESC(lp_clk_ppm, "Low Power Clock accuracy of the local device"); + +module_param_named(tdls, rwnx_mod_params.tdls, bool, S_IRUGO); +MODULE_PARM_DESC(tdls, "Enable TDLS (Default: 1-Enabled)"); + +module_param_named(uf, rwnx_mod_params.uf, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(uf, "Enable Unsupported HT Frame Logging (Default: 0-Disabled)"); + +module_param_named(auto_reply, rwnx_mod_params.auto_reply, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(auto_reply, "Enable Monitor MacAddr Auto-Reply (Default: 0-Disabled)"); + +module_param_named(ftl, rwnx_mod_params.ftl, charp, S_IRUGO); +MODULE_PARM_DESC(ftl, "Firmware trace level (Default: \"\")"); + +module_param_named(dpsm, rwnx_mod_params.dpsm, bool, S_IRUGO); +MODULE_PARM_DESC(dpsm, "Enable Dynamic PowerSaving (Default: 1-Enabled)"); + +#ifdef DEFAULT_COUNTRY_CODE +char default_ccode[4] = DEFAULT_COUNTRY_CODE; +#else +char default_ccode[4] = "00"; +#endif + +char country_code[4]; +module_param_string(country_code, country_code, 4, 0600); + +#define RWNX_REG_RULE(start, end, bw, reg_flags) REG_RULE(start, end, bw, 0, 0, reg_flags) +struct rwnx_regdomain { + char country_code[4]; + struct ieee80211_regdomain *prRegdRules; +}; + +static const int mcs_map_to_rate[4][3] = { + [PHY_CHNL_BW_20][IEEE80211_VHT_MCS_SUPPORT_0_7] = 65, + [PHY_CHNL_BW_20][IEEE80211_VHT_MCS_SUPPORT_0_8] = 78, + [PHY_CHNL_BW_20][IEEE80211_VHT_MCS_SUPPORT_0_9] = 78, + [PHY_CHNL_BW_40][IEEE80211_VHT_MCS_SUPPORT_0_7] = 135, + [PHY_CHNL_BW_40][IEEE80211_VHT_MCS_SUPPORT_0_8] = 162, + [PHY_CHNL_BW_40][IEEE80211_VHT_MCS_SUPPORT_0_9] = 180, + [PHY_CHNL_BW_80][IEEE80211_VHT_MCS_SUPPORT_0_7] = 292, + [PHY_CHNL_BW_80][IEEE80211_VHT_MCS_SUPPORT_0_8] = 351, + [PHY_CHNL_BW_80][IEEE80211_VHT_MCS_SUPPORT_0_9] = 390, + [PHY_CHNL_BW_160][IEEE80211_VHT_MCS_SUPPORT_0_7] = 585, + [PHY_CHNL_BW_160][IEEE80211_VHT_MCS_SUPPORT_0_8] = 702, + [PHY_CHNL_BW_160][IEEE80211_VHT_MCS_SUPPORT_0_9] = 780, +}; + +#define MAX_VHT_RATE(map, nss, bw) (mcs_map_to_rate[bw][map] * (nss)) + +extern struct ieee80211_regdomain *reg_regdb[]; + +char ccode_channels[200]; +int index_for_channel_list = 0; +module_param_string(ccode_channels, ccode_channels, 200, 0600); + +void rwnx_get_countrycode_channels(struct wiphy *wiphy, + struct ieee80211_regdomain *regdomain){ + enum nl80211_band band; + struct ieee80211_supported_band *sband; + int channel_index; + int rule_index; + int band_num = 0; + int rule_num = regdomain->n_reg_rules; + int start_freq = 0; + int end_freq = 0; + int center_freq = 0; + char channel[4]; +#ifdef CONFIG_USE_WIRELESS_EXT + struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy); + int support_freqs_counter = 0; +#endif + + band_num = NUM_NL80211_BANDS; + + memset(ccode_channels, 0, 200); + + index_for_channel_list = 0; + + for (band = 0; band < band_num; band++) { + sband = wiphy->bands[band];// bands: 0:2.4G 1:5G 2:60G + if (!sband) + continue; + + for (channel_index = 0; channel_index < sband->n_channels; channel_index++) { + for(rule_index = 0; rule_index < rule_num; rule_index++){ + start_freq = regdomain->reg_rules[rule_index].freq_range.start_freq_khz/1000; + end_freq = regdomain->reg_rules[rule_index].freq_range.end_freq_khz/1000; + center_freq = sband->channels[channel_index].center_freq; + if((center_freq - 10) >= start_freq && (center_freq + 10) <= end_freq){ +#ifdef CONFIG_USE_WIRELESS_EXT + rwnx_hw->support_freqs[support_freqs_counter++] = center_freq; +#endif + sprintf(channel, "%d",ieee80211_frequency_to_channel(center_freq)); + + memcpy(ccode_channels + index_for_channel_list, channel, strlen(channel)); + + index_for_channel_list += strlen(channel); + + memcpy(ccode_channels + index_for_channel_list, " ", 1); + + index_for_channel_list += 1; + break; + + } + } + } + } +#ifdef CONFIG_USE_WIRELESS_EXT + rwnx_hw->support_freqs_number = support_freqs_counter; +#endif + AICWFDBG(LOGINFO, "%s support channel:%s\r\n", __func__, ccode_channels); +} + + +struct ieee80211_regdomain *getRegdomainFromRwnxDBIndex(struct wiphy *wiphy, + int index) +{ + u8 idx; + + idx = index; + + memset(country_code, 0, 4); + country_code[0] = reg_regdb[idx]->alpha2[0]; + country_code[1] = reg_regdb[idx]->alpha2[1]; + + AICWFDBG(LOGINFO, "%s set ccode:%s \r\n", __func__, country_code); + + rwnx_get_countrycode_channels(wiphy, reg_regdb[idx]); + + return reg_regdb[idx]; +} + +extern int reg_regdb_size; + +struct ieee80211_regdomain *getRegdomainFromRwnxDB(struct wiphy *wiphy, + char *alpha2) +{ + u8 idx; + + memset(country_code, 0, 4); + + AICWFDBG(LOGINFO, "%s set ccode:%s \r\n", __func__, alpha2); + + idx = 0; + + while (reg_regdb[idx] && idx < reg_regdb_size){ + if((reg_regdb[idx]->alpha2[0] == alpha2[0]) && + (reg_regdb[idx]->alpha2[1] == alpha2[1])){ + memcpy(country_code, alpha2, 2); + rwnx_get_countrycode_channels(wiphy, reg_regdb[idx]); + return reg_regdb[idx]; + } + idx++; + } + + AICWFDBG(LOGERROR, "%s(): Error, wrong country = %s\n", + __func__, alpha2); + AICWFDBG(LOGINFO, "Set as default 00\n"); + + memcpy(country_code, default_ccode, sizeof(default_ccode)); + rwnx_get_countrycode_channels(wiphy, reg_regdb[0]); + + return reg_regdb[0]; +} + + +/** + * Do some sanity check + * + */ +#if 0 +static int rwnx_check_fw_hw_feature(struct rwnx_hw *rwnx_hw, + struct wiphy *wiphy) +{ + u32_l sys_feat = rwnx_hw->version_cfm.features; + u32_l mac_feat = rwnx_hw->version_cfm.version_machw_1; + u32_l phy_feat = rwnx_hw->version_cfm.version_phy_1; + u32_l phy_vers = rwnx_hw->version_cfm.version_phy_2; + u16_l max_sta_nb = rwnx_hw->version_cfm.max_sta_nb; + u8_l max_vif_nb = rwnx_hw->version_cfm.max_vif_nb; + int bw, res = 0; + int amsdu_rx; + + if (!rwnx_hw->mod_params->custregd) + rwnx_hw->mod_params->custchan = false; + + if (rwnx_hw->mod_params->custchan) { + rwnx_hw->mod_params->mesh = false; + rwnx_hw->mod_params->tdls = false; + } + +#ifdef CONFIG_RWNX_FULLMAC + + if (!(sys_feat & BIT(MM_FEAT_UMAC_BIT))) { + wiphy_err(wiphy, + "Loading softmac firmware with fullmac driver\n"); + res = -1; + } + + if (!(sys_feat & BIT(MM_FEAT_ANT_DIV_BIT))) { + rwnx_hw->mod_params->ant_div = false; + } + +#endif /* CONFIG_RWNX_FULLMAC */ + + if (!(sys_feat & BIT(MM_FEAT_VHT_BIT))) { + rwnx_hw->mod_params->vht_on = false; + } + + // Check if HE is supported + if (!(sys_feat & BIT(MM_FEAT_HE_BIT))) { + rwnx_hw->mod_params->he_on = false; + rwnx_hw->mod_params->he_ul_on = false; + } + + if (!(sys_feat & BIT(MM_FEAT_PS_BIT))) { + rwnx_hw->mod_params->ps_on = false; + } + + /* AMSDU (non)support implies different shared structure definition + so insure that fw and drv have consistent compilation option */ + if (sys_feat & BIT(MM_FEAT_AMSDU_BIT)) { +#ifndef CONFIG_RWNX_SPLIT_TX_BUF + wiphy_err(wiphy, + "AMSDU enabled in firmware but support not compiled in driver\n"); + res = -1; +#else + if (rwnx_hw->mod_params->amsdu_maxnb > NX_TX_PAYLOAD_MAX) + rwnx_hw->mod_params->amsdu_maxnb = NX_TX_PAYLOAD_MAX; +#endif /* CONFIG_RWNX_SPLIT_TX_BUF */ + } else { +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + wiphy_err(wiphy, + "AMSDU disabled in firmware but support compiled in driver\n"); + res = -1; +#endif /* CONFIG_RWNX_SPLIT_TX_BUF */ + } + + if (!(sys_feat & BIT(MM_FEAT_UAPSD_BIT))) { + rwnx_hw->mod_params->uapsd_timeout = 0; + } + + if (!(sys_feat & BIT(MM_FEAT_BFMEE_BIT))) { + rwnx_hw->mod_params->bfmee = false; + } + + if ((sys_feat & BIT(MM_FEAT_BFMER_BIT))) { +#ifndef CONFIG_RWNX_BFMER + wiphy_err(wiphy, + "BFMER enabled in firmware but support not compiled in driver\n"); + res = -1; +#endif /* CONFIG_RWNX_BFMER */ + // Check PHY and MAC HW BFMER support and update parameter accordingly + if (!(phy_feat & MDM_BFMER_BIT) || !(mac_feat & NXMAC_BFMER_BIT)) { + rwnx_hw->mod_params->bfmer = false; + // Disable the feature in the bitfield so that it won't be displayed + sys_feat &= ~BIT(MM_FEAT_BFMER_BIT); + } + } else { +#ifdef CONFIG_RWNX_BFMER + wiphy_err(wiphy, + "BFMER disabled in firmware but support compiled in driver\n"); + res = -1; +#else + rwnx_hw->mod_params->bfmer = false; +#endif /* CONFIG_RWNX_BFMER */ + } + + if (!(sys_feat & BIT(MM_FEAT_MESH_BIT))) { + rwnx_hw->mod_params->mesh = false; + } + + if (!(sys_feat & BIT(MM_FEAT_TDLS_BIT))) { + rwnx_hw->mod_params->tdls = false; + } + + if (!(sys_feat & BIT(MM_FEAT_UF_BIT))) { + rwnx_hw->mod_params->uf = false; + } + +#ifdef CONFIG_RWNX_FULLMAC + if ((sys_feat & BIT(MM_FEAT_MON_DATA_BIT))) { +#ifndef CONFIG_RWNX_MON_DATA + wiphy_err(wiphy, + "Monitor+Data interface support (MON_DATA) is enabled in firmware but support not compiled in driver\n"); + res = -1; +#endif /* CONFIG_RWNX_MON_DATA */ + } else { +#ifdef CONFIG_RWNX_MON_DATA + wiphy_err(wiphy, + "Monitor+Data interface support (MON_DATA) disabled in firmware but support compiled in driver\n"); + res = -1; +#endif /* CONFIG_RWNX_MON_DATA */ + } +#endif + + // Check supported AMSDU RX size + amsdu_rx = (sys_feat >> MM_AMSDU_MAX_SIZE_BIT0) & 0x03; + if (amsdu_rx < rwnx_hw->mod_params->amsdu_rx_max) { + rwnx_hw->mod_params->amsdu_rx_max = amsdu_rx; + } + + // Check supported BW + bw = (phy_feat & MDM_CHBW_MASK) >> MDM_CHBW_LSB; + // Check if 80MHz BW is supported + if (bw < 2) { + rwnx_hw->mod_params->use_80 = false; + } + // Check if 40MHz BW is supported + if (bw < 1) + rwnx_hw->mod_params->use_2040 = false; + + // 80MHz BW shall be disabled if 40MHz is not enabled + if (!rwnx_hw->mod_params->use_2040) + rwnx_hw->mod_params->use_80 = false; + + // Check if HT is supposed to be supported. If not, disable VHT/HE too + if (!rwnx_hw->mod_params->ht_on) + { + rwnx_hw->mod_params->vht_on = false; + rwnx_hw->mod_params->he_on = false; + rwnx_hw->mod_params->he_ul_on = false; + rwnx_hw->mod_params->use_80 = false; + rwnx_hw->mod_params->use_2040 = false; + } + + // LDPC is mandatory for HE40 and above, so if LDPC is not supported, then disable + // HE to use HT/VHT only + if (rwnx_hw->mod_params->use_2040 && !rwnx_hw->mod_params->ldpc_on) + { + rwnx_hw->mod_params->he_on = false; + rwnx_hw->mod_params->he_ul_on = false; + } + + // HT greenfield is not supported in modem >= 3.0 + if (__MDM_MAJOR_VERSION(phy_vers) > 0) { + rwnx_hw->mod_params->gf_rx_on = false; + } + + if (!(sys_feat & BIT(MM_FEAT_MU_MIMO_RX_BIT)) || + !rwnx_hw->mod_params->bfmee) { + rwnx_hw->mod_params->murx = false; + } + + if ((sys_feat & BIT(MM_FEAT_MU_MIMO_TX_BIT))) { +#ifndef CONFIG_RWNX_MUMIMO_TX + wiphy_err(wiphy, + "MU-MIMO TX enabled in firmware but support not compiled in driver\n"); + res = -1; +#endif /* CONFIG_RWNX_MUMIMO_TX */ + if (!rwnx_hw->mod_params->bfmer) + rwnx_hw->mod_params->mutx = false; + // Check PHY and MAC HW MU-MIMO TX support and update parameter accordingly + else if (!(phy_feat & MDM_MUMIMOTX_BIT) || !(mac_feat & NXMAC_MU_MIMO_TX_BIT)) { + rwnx_hw->mod_params->mutx = false; + // Disable the feature in the bitfield so that it won't be displayed + sys_feat &= ~BIT(MM_FEAT_MU_MIMO_TX_BIT); + } + } else { +#ifdef CONFIG_RWNX_MUMIMO_TX + wiphy_err(wiphy, + "MU-MIMO TX disabled in firmware but support compiled in driver\n"); + res = -1; +#else + rwnx_hw->mod_params->mutx = false; +#endif /* CONFIG_RWNX_MUMIMO_TX */ + } + + if (sys_feat & BIT(MM_FEAT_WAPI_BIT)) { + rwnx_enable_wapi(rwnx_hw); + } + +#ifdef CONFIG_RWNX_FULLMAC + if (sys_feat & BIT(MM_FEAT_MFP_BIT)) { + rwnx_enable_mfp(rwnx_hw); + } +#endif + +#ifdef CONFIG_RWNX_FULLMAC +#define QUEUE_NAME "Broadcast/Multicast queue " +#endif /* CONFIG_RWNX_FULLMAC */ + + if (sys_feat & BIT(MM_FEAT_BCN_BIT)) { +#if NX_TXQ_CNT == 4 + wiphy_err(wiphy, QUEUE_NAME + "enabled in firmware but support not compiled in driver\n"); + res = -1; +#endif /* NX_TXQ_CNT == 4 */ + } else { +#if NX_TXQ_CNT == 5 + wiphy_err(wiphy, QUEUE_NAME + "disabled in firmware but support compiled in driver\n"); + res = -1; +#endif /* NX_TXQ_CNT == 5 */ + } +#undef QUEUE_NAME + +#ifdef CONFIG_RWNX_RADAR + if (sys_feat & BIT(MM_FEAT_RADAR_BIT)) { + /* Enable combination with radar detection */ + wiphy->n_iface_combinations++; + } +#endif /* CONFIG_RWNX_RADAR */ + +#ifndef CONFIG_RWNX_SDM + switch (__MDM_PHYCFG_FROM_VERS(phy_feat)) { + case MDM_PHY_CONFIG_TRIDENT: + case MDM_PHY_CONFIG_ELMA: + rwnx_hw->mod_params->nss = 1; + break; + case MDM_PHY_CONFIG_KARST: + { + int nss_supp = (phy_feat & MDM_NSS_MASK) >> MDM_NSS_LSB; + if (rwnx_hw->mod_params->nss > nss_supp) + rwnx_hw->mod_params->nss = nss_supp; + } + break; + default: + WARN_ON(1); + break; + } +#endif /* CONFIG_RWNX_SDM */ + + if (rwnx_hw->mod_params->nss < 1 || rwnx_hw->mod_params->nss > 2) + rwnx_hw->mod_params->nss = 1; + + if (rwnx_hw->mod_params->phy_cfg < 0 || rwnx_hw->mod_params->phy_cfg > 5) + rwnx_hw->mod_params->phy_cfg = 2; + + if (rwnx_hw->mod_params->mcs_map < 0 || rwnx_hw->mod_params->mcs_map > 2) + rwnx_hw->mod_params->mcs_map = 0; + + wiphy_info(wiphy, "PHY features: [NSS=%d][CHBW=%d]%s%s\n", + rwnx_hw->mod_params->nss, + 20 * (1 << ((phy_feat & MDM_CHBW_MASK) >> MDM_CHBW_LSB)), + rwnx_hw->mod_params->ldpc_on ? "[LDPC]" : "", + rwnx_hw->mod_params->he_on ? "[HE]" : ""); + +#define PRINT_RWNX_FEAT(feat) \ + (sys_feat & BIT(MM_FEAT_##feat##_BIT) ? "["#feat"]" : "") + + wiphy_info(wiphy, "FW features: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + PRINT_RWNX_FEAT(BCN), + PRINT_RWNX_FEAT(AUTOBCN), + PRINT_RWNX_FEAT(HWSCAN), + PRINT_RWNX_FEAT(CMON), + PRINT_RWNX_FEAT(MROLE), + PRINT_RWNX_FEAT(RADAR), + PRINT_RWNX_FEAT(PS), + PRINT_RWNX_FEAT(UAPSD), + PRINT_RWNX_FEAT(DPSM), + PRINT_RWNX_FEAT(AMPDU), + PRINT_RWNX_FEAT(AMSDU), + PRINT_RWNX_FEAT(CHNL_CTXT), + PRINT_RWNX_FEAT(REORD), + PRINT_RWNX_FEAT(P2P), + PRINT_RWNX_FEAT(P2P_GO), + PRINT_RWNX_FEAT(UMAC), + PRINT_RWNX_FEAT(VHT), + PRINT_RWNX_FEAT(HE), + PRINT_RWNX_FEAT(BFMEE), + PRINT_RWNX_FEAT(BFMER), + PRINT_RWNX_FEAT(WAPI), + PRINT_RWNX_FEAT(MFP), + PRINT_RWNX_FEAT(MU_MIMO_RX), + PRINT_RWNX_FEAT(MU_MIMO_TX), + PRINT_RWNX_FEAT(MESH), + PRINT_RWNX_FEAT(TDLS), + PRINT_RWNX_FEAT(ANT_DIV)); +#undef PRINT_RWNX_FEAT + + if(max_sta_nb != NX_REMOTE_STA_MAX) + { + wiphy_err(wiphy, "Different number of supported stations between driver and FW (%d != %d)\n", + NX_REMOTE_STA_MAX, max_sta_nb); + res = -1; + } + + if(max_vif_nb != NX_VIRT_DEV_MAX) + { + wiphy_err(wiphy, "Different number of supported virtual interfaces between driver and FW (%d != %d)\n", + NX_VIRT_DEV_MAX, max_vif_nb); + res = -1; + } + + return res; +} +#endif + + +static void rwnx_set_vht_capa(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy) +{ +#ifdef CONFIG_VHT_FOR_OLD_KERNEL + #ifdef USE_5G + struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ]; + #endif + struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ]; + + int i; + int nss = rwnx_hw->mod_params->nss; + int mcs_map; + int mcs_map_max; + int bw_max; + + if (!rwnx_hw->mod_params->vht_on) { + return; + } + + if(rwnx_hw->usbdev->chipid <= PRODUCT_ID_AIC8800D81) + nss = 1; + + rwnx_hw->vht_cap_2G.vht_supported = true; + if (rwnx_hw->mod_params->sgi80) + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_SHORT_GI_80; + if (rwnx_hw->mod_params->stbc_on) + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_RXSTBC_1; + if (rwnx_hw->mod_params->ldpc_on) + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_RXLDPC; + if (rwnx_hw->mod_params->bfmee) { + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + rwnx_hw->vht_cap_2G.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + #else + rwnx_hw->vht_cap_2G.cap |= 3 << 13; + #endif + } + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_TXSTBC; + } + // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap) + rwnx_hw->vht_cap_2G.cap |= rwnx_hw->mod_params->amsdu_rx_max; + + if (rwnx_hw->mod_params->bfmer) { + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + /* Set number of sounding dimensions */ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + rwnx_hw->vht_cap_2G.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + #else + rwnx_hw->vht_cap_2G.cap |= (nss - 1) << 16; + #endif + } + if (rwnx_hw->mod_params->murx) + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (rwnx_hw->mod_params->mutx) + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + /* + * MCS map: + * This capabilities are filled according to the mcs_map module parameter. + * However currently we have some limitations due to FPGA clock constraints + * that prevent always using the range of MCS that is defined by the + * parameter: + * - in RX, 2SS, we support up to MCS7 + * - in TX, 2SS, we support up to MCS8 + */ + // Get max supported BW + if (rwnx_hw->mod_params->use_80) + bw_max = PHY_CHNL_BW_80; + else if (rwnx_hw->mod_params->use_2040) + bw_max = PHY_CHNL_BW_40; + else + bw_max = PHY_CHNL_BW_20; + + // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20, + // MCS9 is not supported in 1 and 2 SS + if (rwnx_hw->mod_params->use_2040) + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9; + else + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8; + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + rwnx_hw->vht_cap_2G.vht_mcs.rx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + rwnx_hw->vht_cap_2G.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + rwnx_hw->vht_cap_2G.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9; + } + for (; i < 8; i++) { + rwnx_hw->vht_cap_2G.vht_mcs.rx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + rwnx_hw->vht_cap_2G.vht_mcs.tx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + rwnx_hw->vht_cap_2G.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + rwnx_hw->vht_cap_2G.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, + // IEEE80211_VHT_MCS_SUPPORT_0_9); + } + for (; i < 8; i++) { + rwnx_hw->vht_cap_2G.vht_mcs.tx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + if (!rwnx_hw->mod_params->use_80) { +#ifdef CONFIG_VENDOR_RWNX_VHT_NO80 + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80; +#endif + rwnx_hw->vht_cap_2G.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80; + } + + rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + printk("%s, vht_capa_info=0x%x\n", __func__, rwnx_hw->vht_cap_2G.cap); +#ifdef USE_5G + if (rwnx_hw->band_5g_support) { + rwnx_hw->vht_cap_5G.vht_supported = true; + if (rwnx_hw->mod_params->sgi80) + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_SHORT_GI_80; + if (rwnx_hw->mod_params->stbc_on) + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_RXSTBC_1; + if (rwnx_hw->mod_params->ldpc_on) + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_RXLDPC; + if (rwnx_hw->mod_params->bfmee) { + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + rwnx_hw->vht_cap_5G.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + #else + rwnx_hw->vht_cap_5G.cap |= 3 << 13; + #endif + } + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_TXSTBC; + } + + // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap) + rwnx_hw->vht_cap_5G.cap |= rwnx_hw->mod_params->amsdu_rx_max; + + if (rwnx_hw->mod_params->bfmer) { + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + /* Set number of sounding dimensions */ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + rwnx_hw->vht_cap_5G.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + #else + rwnx_hw->vht_cap_5G.cap |= (nss - 1) << 16; + #endif + } + if (rwnx_hw->mod_params->murx) + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (rwnx_hw->mod_params->mutx) + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + /* + * MCS map: + * This capabilities are filled according to the mcs_map module parameter. + * However currently we have some limitations due to FPGA clock constraints + * that prevent always using the range of MCS that is defined by the + * parameter: + * - in RX, 2SS, we support up to MCS7 + * - in TX, 2SS, we support up to MCS8 + */ + // Get max supported BW + if (rwnx_hw->mod_params->use_80) + bw_max = PHY_CHNL_BW_80; + else if (rwnx_hw->mod_params->use_2040) + bw_max = PHY_CHNL_BW_40; + else + bw_max = PHY_CHNL_BW_20; + + // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20, + // MCS9 is not supported in 1 and 2 SS + if (rwnx_hw->mod_params->use_2040) + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9; + else + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8; + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + rwnx_hw->vht_cap_5G.vht_mcs.rx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + rwnx_hw->vht_cap_5G.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + rwnx_hw->vht_cap_5G.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9; + } + for (; i < 8; i++) { + rwnx_hw->vht_cap_5G.vht_mcs.rx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + rwnx_hw->vht_cap_5G.vht_mcs.tx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + rwnx_hw->vht_cap_5G.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + rwnx_hw->vht_cap_5G.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, + // IEEE80211_VHT_MCS_SUPPORT_0_9); + } + for (; i < 8; i++) { + rwnx_hw->vht_cap_5G.vht_mcs.tx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + if (!rwnx_hw->mod_params->use_80) { +#ifdef CONFIG_VENDOR_RWNX_VHT_NO80 + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80; +#endif + rwnx_hw->vht_cap_5G.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80; + } + + rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + } +#endif + return; +#endif + + //#ifdef USE_5G + struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ]; + //#endif + struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ]; + + int i; + int nss = rwnx_hw->mod_params->nss; + int mcs_map; + int mcs_map_max; + int bw_max; + + if (!rwnx_hw->mod_params->vht_on) { + return; + } + + if(rwnx_hw->usbdev->chipid <= PRODUCT_ID_AIC8800D81){ + nss = 1; + } + + band_2GHz->vht_cap.vht_supported = true; + if (rwnx_hw->mod_params->sgi80) + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80; + if (rwnx_hw->mod_params->stbc_on) + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXSTBC_1; + if (rwnx_hw->mod_params->ldpc_on) + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXLDPC; + if (rwnx_hw->mod_params->bfmee) { + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + band_2GHz->vht_cap.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + #else + band_2GHz->vht_cap.cap |= 3 << 13; + #endif + } + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_TXSTBC; + } + // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap) + band_2GHz->vht_cap.cap |= rwnx_hw->mod_params->amsdu_rx_max; + + if (rwnx_hw->mod_params->bfmer) { + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + /* Set number of sounding dimensions */ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + band_2GHz->vht_cap.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + #else + band_2GHz->vht_cap.cap |= (nss - 1) << 16; + #endif + } + if (rwnx_hw->mod_params->murx) + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (rwnx_hw->mod_params->mutx) + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + /* + * MCS map: + * This capabilities are filled according to the mcs_map module parameter. + * However currently we have some limitations due to FPGA clock constraints + * that prevent always using the range of MCS that is defined by the + * parameter: + * - in RX, 2SS, we support up to MCS7 + * - in TX, 2SS, we support up to MCS8 + */ + // Get max supported BW + if (rwnx_hw->mod_params->use_80) + bw_max = PHY_CHNL_BW_80; + else if (rwnx_hw->mod_params->use_2040) + bw_max = PHY_CHNL_BW_40; + else + bw_max = PHY_CHNL_BW_20; + + // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20, + // MCS9 is not supported in 1 and 2 SS + if (rwnx_hw->mod_params->use_2040) + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9; + else + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8; + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + band_2GHz->vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + band_2GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + band_2GHz->vht_cap.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9; + } + for (; i < 8; i++) { + band_2GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + band_2GHz->vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + band_2GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + band_2GHz->vht_cap.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, + // IEEE80211_VHT_MCS_SUPPORT_0_9); + } + for (; i < 8; i++) { + band_2GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + if (!rwnx_hw->mod_params->use_80) { +#ifdef CONFIG_VENDOR_RWNX_VHT_NO80 + band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80; +#endif + band_2GHz->vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80; + } + + +//#ifdef USE_5G + if (rwnx_hw->band_5g_support) { + band_5GHz->vht_cap.vht_supported = true; + if (rwnx_hw->mod_params->sgi80) + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80; + if (rwnx_hw->mod_params->stbc_on) + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXSTBC_1; + if (rwnx_hw->mod_params->ldpc_on) + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXLDPC; + if (rwnx_hw->mod_params->bfmee) { + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + band_5GHz->vht_cap.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + #else + band_5GHz->vht_cap.cap |= 3 << 13; + #endif + } + + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_TXSTBC; + } + + // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap) + band_5GHz->vht_cap.cap |= rwnx_hw->mod_params->amsdu_rx_max; + + if (rwnx_hw->mod_params->bfmer) { + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + /* Set number of sounding dimensions */ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + band_5GHz->vht_cap.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + #else + band_5GHz->vht_cap.cap |= (nss - 1) << 16; + #endif + } + if (rwnx_hw->mod_params->murx) + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (rwnx_hw->mod_params->mutx) + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + /* + * MCS map: + * This capabilities are filled according to the mcs_map module parameter. + * However currently we have some limitations due to FPGA clock constraints + * that prevent always using the range of MCS that is defined by the + * parameter: + * - in RX, 2SS, we support up to MCS7 + * - in TX, 2SS, we support up to MCS8 + */ + // Get max supported BW + if (rwnx_hw->mod_params->use_80) + bw_max = PHY_CHNL_BW_80; + else if (rwnx_hw->mod_params->use_2040) + bw_max = PHY_CHNL_BW_40; + else + bw_max = PHY_CHNL_BW_20; + + // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20, + // MCS9 is not supported in 1 and 2 SS + if (rwnx_hw->mod_params->use_2040) + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9; + else + mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8; + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + band_5GHz->vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + band_5GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + band_5GHz->vht_cap.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9; + } + for (; i < 8; i++) { + band_5GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max); + band_5GHz->vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(0); + for (i = 0; i < nss; i++) { + band_5GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2)); + band_5GHz->vht_cap.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max); + //mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, + // IEEE80211_VHT_MCS_SUPPORT_0_9); + } + for (; i < 8; i++) { + band_5GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16( + IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2)); + } + + if (!rwnx_hw->mod_params->use_80) { +#ifdef CONFIG_VENDOR_RWNX_VHT_NO80 + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80; +#endif + band_5GHz->vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80; + } +//#endif + } +} + +static void rwnx_set_ht_capa(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy) +{ + //#ifdef USE_5G + struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ]; + //#endif + + struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ]; + int i; + int nss = rwnx_hw->mod_params->nss; + + if (!rwnx_hw->mod_params->ht_on) { + band_2GHz->ht_cap.ht_supported = false; + //#ifdef USE_5G + if (rwnx_hw->band_5g_support){ + band_5GHz->ht_cap.ht_supported = false; + } + //#endif + return; + } + + if(rwnx_hw->usbdev->chipid <= PRODUCT_ID_AIC8800D81) + nss = 1; + + if (rwnx_hw->mod_params->stbc_on) + band_2GHz->ht_cap.cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT; + if (rwnx_hw->mod_params->ldpc_on) + band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + if (rwnx_hw->mod_params->use_2040) { + band_2GHz->ht_cap.mcs.rx_mask[4] = 0x1; /* MCS32 */ + band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(135 * nss); + } else { + band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(65 * nss); + } + + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; + } + + // Update the AMSDU max RX size + if (rwnx_hw->mod_params->amsdu_rx_max) + band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + if (rwnx_hw->mod_params->sgi) { + band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; + if (rwnx_hw->mod_params->use_2040) { + band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(150 * nss); + } else + band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(72 * nss); + } + if (rwnx_hw->mod_params->gf_rx_on) + band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD; + + for (i = 0; i < nss; i++) { + band_2GHz->ht_cap.mcs.rx_mask[i] = 0xFF; + } + + //#ifdef USE_5G + if (rwnx_hw->band_5g_support){ + band_5GHz->ht_cap = band_2GHz->ht_cap; + } + //#endif +} + +#ifdef CONFIG_HE_FOR_OLD_KERNEL +extern struct ieee80211_sband_iftype_data rwnx_he_capa; +#endif +static void rwnx_set_he_capa(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy) +{ + #ifdef CONFIG_HE_FOR_OLD_KERNEL + struct ieee80211_sta_he_cap *he_cap; + int i; + int nss = rwnx_hw->mod_params->nss; + int mcs_map; + + if(rwnx_hw->usbdev->chipid <= PRODUCT_ID_AIC8800D81) + nss = 1; + + he_cap = (struct ieee80211_sta_he_cap *) &rwnx_he_capa.he_cap; + he_cap->has_he = true; + he_cap->he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_ALL_ACK; + if (rwnx_hw->mod_params->use_2040) { + he_cap->he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + he_cap->ppe_thres[0] |= 0x10; + } + if (rwnx_hw->mod_params->use_80) { + he_cap->ppe_thres[0] |= 0x20; + he_cap->ppe_thres[2] |= 0xc0; + he_cap->ppe_thres[3] |= 0x01; + } + if (nss == 2) { + he_cap->ppe_thres[0] |= 0x01; + he_cap->ppe_thres[3] |= 0x70; + he_cap->ppe_thres[4] |= 0x1c; + he_cap->ppe_thres[5] |= 0x07; + } + //if (rwnx_hw->mod_params->use_80) + { + he_cap->he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; + } + if (rwnx_hw->mod_params->ldpc_on) { + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; + } else { + // If no LDPC is supported, we have to limit to MCS0_9, as LDPC is mandatory + // for MCS 10 and 11 + rwnx_hw->mod_params->he_mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, + IEEE80211_HE_MCS_SUPPORT_0_9); + } + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US + | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; + + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS | + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_DOPPLER_RX; + #else + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_DOPPLER_RX; + #endif + if (rwnx_hw->mod_params->stbc_on) + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; + + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ; + } + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 | + IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU; + #else + he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 | + IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA; + #endif + if (rwnx_hw->mod_params->bfmee) { + he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE; + he_cap->he_cap_elem.phy_cap_info[4] |= + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4; + } + he_cap->he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; +#else + he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; +#endif + //he_cap->he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; + he_cap->he_cap_elem.phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G; + he_cap->he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; + mcs_map = rwnx_hw->mod_params->he_mcs_map; + //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9); + memset(&he_cap->he_mcs_nss_supp, 0, sizeof(he_cap->he_mcs_nss_supp)); + for (i = 0; i < nss; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(mcs_map << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss; + //mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7; + } + for (; i < 8; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_80 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss; + } + mcs_map = rwnx_hw->mod_params->he_mcs_map; + for (i = 0; i < nss; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(mcs_map << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss; + //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, + // IEEE80211_HE_MCS_SUPPORT_0_7); + } + for (; i < 8; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_80 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss; + } + + return ; + #endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + //#ifdef USE_5G + struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ]; + //#endif + struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ]; + int i; + int nss = rwnx_hw->mod_params->nss; + struct ieee80211_sta_he_cap *he_cap; + int mcs_map; + if (!rwnx_hw->mod_params->he_on) { + band_2GHz->iftype_data = NULL; + band_2GHz->n_iftype_data = 0; + //#ifdef USE_5G + if (rwnx_hw->band_5g_support) { + band_5GHz->iftype_data = NULL; + band_5GHz->n_iftype_data = 0; + } + //#endif + return; + } + + if(rwnx_hw->usbdev->chipid <= PRODUCT_ID_AIC8800D81) + nss = 1; + + he_cap = (struct ieee80211_sta_he_cap *) &band_2GHz->iftype_data->he_cap; + he_cap->has_he = true; + he_cap->he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_ALL_ACK; + if (rwnx_hw->mod_params->use_2040) { + he_cap->he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + he_cap->ppe_thres[0] |= 0x10; + } + if (rwnx_hw->mod_params->use_80) { + he_cap->ppe_thres[0] |= 0x20; + he_cap->ppe_thres[2] |= 0xc0; + he_cap->ppe_thres[3] |= 0x01; + } + if (nss == 2) { + he_cap->ppe_thres[0] |= 0x01; + he_cap->ppe_thres[3] |= 0x70; + he_cap->ppe_thres[4] |= 0x1c; + he_cap->ppe_thres[5] |= 0x07; + } + //if (rwnx_hw->mod_params->use_80) + { + he_cap->he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; + } + if (rwnx_hw->mod_params->ldpc_on) { + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; + } else { + // If no LDPC is supported, we have to limit to MCS0_9, as LDPC is mandatory + // for MCS 10 and 11 + rwnx_hw->mod_params->he_mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, + IEEE80211_HE_MCS_SUPPORT_0_9); + } + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US + | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; + + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS | + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_DOPPLER_RX; + #else + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_DOPPLER_RX; + #endif + if (rwnx_hw->mod_params->stbc_on) + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; + + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 | + IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU; +#else + he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 | + IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA; +#endif + if (rwnx_hw->mod_params->bfmee) { + he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE; + he_cap->he_cap_elem.phy_cap_info[4] |= + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4; + } + he_cap->he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; + #else + he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; + #endif + //he_cap->he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; + he_cap->he_cap_elem.phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + he_cap->he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; + #endif + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 || rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW) + mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9); + else + mcs_map = rwnx_hw->mod_params->he_mcs_map; + //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9); + memset(&he_cap->he_mcs_nss_supp, 0, sizeof(he_cap->he_mcs_nss_supp)); + for (i = 0; i < nss; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(mcs_map << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss; + //mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7; + } + for (; i < 8; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_80 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss; + } + mcs_map = rwnx_hw->mod_params->he_mcs_map; + for (i = 0; i < nss; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(mcs_map << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss; + //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, + // IEEE80211_HE_MCS_SUPPORT_0_7); + } + for (; i < 8; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_80 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss; + } + +//#ifdef USE_5G + if(rwnx_hw->band_5g_support){ + he_cap = (struct ieee80211_sta_he_cap *) &band_5GHz->iftype_data->he_cap; + he_cap->has_he = true; + he_cap->he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_ALL_ACK; + if (rwnx_hw->mod_params->use_2040) { + he_cap->he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + he_cap->ppe_thres[0] |= 0x10; + } + if (rwnx_hw->mod_params->use_80) { + he_cap->ppe_thres[0] |= 0x20; + he_cap->ppe_thres[2] |= 0xc0; + he_cap->ppe_thres[3] |= 0x01; + } + if (nss == 2) { + he_cap->ppe_thres[0] |= 0x01; + he_cap->ppe_thres[3] |= 0x70; + he_cap->ppe_thres[4] |= 0x1c; + he_cap->ppe_thres[5] |= 0x07; + } + //if (rwnx_hw->mod_params->use_80) + { + he_cap->he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; + } + if (rwnx_hw->mod_params->ldpc_on) { + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; + } else { + // If no LDPC is supported, we have to limit to MCS0_9, as LDPC is mandatory + // for MCS 10 and 11 + rwnx_hw->mod_params->he_mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, + IEEE80211_HE_MCS_SUPPORT_0_9); + } + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS | + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_DOPPLER_RX; + #else + he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_DOPPLER_RX; + #endif + if (rwnx_hw->mod_params->stbc_on) + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; + + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2) && + rwnx_hw->mod_params->stbc_on == true){ //if (nss > 1) + he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 | + IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU; +#else + he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 | + IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA; +#endif + if (rwnx_hw->mod_params->bfmee) { + he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE; + he_cap->he_cap_elem.phy_cap_info[4] |= + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4; + } + he_cap->he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; +#else + he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; +#endif + //he_cap->he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; + he_cap->he_cap_elem.phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + he_cap->he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; + #endif + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 || rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW) + mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9); + else + mcs_map = rwnx_hw->mod_params->he_mcs_map; + //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9); + memset(&he_cap->he_mcs_nss_supp, 0, sizeof(he_cap->he_mcs_nss_supp)); + for (i = 0; i < nss; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(mcs_map << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss; + //mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7; + } + for (; i < 8; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.rx_mcs_80 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss; + } + mcs_map = rwnx_hw->mod_params->he_mcs_map; + for (i = 0; i < nss; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(mcs_map << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss; + //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, + // IEEE80211_HE_MCS_SUPPORT_0_7); + } + for (; i < 8; i++) { + __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2)); + he_cap->he_mcs_nss_supp.tx_mcs_80 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss; + he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss; + } + } +//#endif +#endif +} + +static void rwnx_set_wiphy_params(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) + struct ieee80211_regdomain *regdomain; +#endif + +#ifdef CONFIG_RWNX_FULLMAC + /* FULLMAC specific parameters */ + wiphy->flags |= WIPHY_FLAG_REPORTS_OBSS; + wiphy->max_scan_ssids = SCAN_SSID_MAX; + wiphy->max_scan_ie_len = SCANU_MAX_IE_LEN; +#endif /* CONFIG_RWNX_FULLMAC */ + + if (rwnx_hw->mod_params->tdls) { + /* TDLS support */ + wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; +#ifdef CONFIG_RWNX_FULLMAC + /* TDLS external setup support */ + wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; +#endif + } + + if (rwnx_hw->mod_params->ap_uapsd_on) + wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + +#ifdef CONFIG_RWNX_FULLMAC + if (rwnx_hw->mod_params->ps_on) + wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; +#endif + + if (rwnx_hw->mod_params->custregd) { + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + // Apply custom regulatory. Note that for recent kernel versions we use instead the + // REGULATORY_WIPHY_SELF_MANAGED flag, along with the regulatory_set_wiphy_regd() + // function, that needs to be called after wiphy registration + memcpy(country_code, default_ccode, sizeof(default_ccode)); + regdomain = getRegdomainFromRwnxDB(wiphy, default_ccode); + printk(KERN_CRIT + "\n\n%s: CAUTION: USING PERMISSIVE CUSTOM REGULATORY RULES\n\n", + __func__); + wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; + wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF; + wiphy_apply_custom_regulatory(wiphy, regdomain); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + memcpy(country_code, default_ccode, sizeof(default_ccode)); + regdomain = getRegdomainFromRwnxDB(wiphy, default_ccode); + printk(KERN_CRIT"%s: Registering custom regulatory\n", __func__); + wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(wiphy, regdomain); +#endif + // Check if custom channel set shall be enabled. In such case only monitor mode is + // supported + if (rwnx_hw->mod_params->custchan) { + wiphy->interface_modes = BIT(NL80211_IFTYPE_MONITOR); + + // Enable "extra" channels + wiphy->bands[NL80211_BAND_2GHZ]->n_channels += 13; + //#ifdef USE_5G + if(rwnx_hw->band_5g_support){ + wiphy->bands[NL80211_BAND_5GHZ]->n_channels += 59; + } + //#endif + } + } +} + +#if 0 +static void rwnx_set_rf_params(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy) +{ +#ifndef CONFIG_RWNX_SDM + #ifdef USE_5G + struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ]; + #endif + struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ]; + u32 mdm_phy_cfg = __MDM_PHYCFG_FROM_VERS(rwnx_hw->version_cfm.version_phy_1); + + /* + * Get configuration file depending on the RF + */ + if (mdm_phy_cfg == MDM_PHY_CONFIG_TRIDENT) { + struct rwnx_phy_conf_file phy_conf; + // Retrieve the Trident configuration + rwnx_parse_phy_configfile(rwnx_hw, RWNX_PHY_CONFIG_TRD_NAME, + &phy_conf, rwnx_hw->mod_params->phy_cfg); + memcpy(&rwnx_hw->phy.cfg, &phy_conf.trd, sizeof(phy_conf.trd)); + } else if (mdm_phy_cfg == MDM_PHY_CONFIG_ELMA) { + } else if (mdm_phy_cfg == MDM_PHY_CONFIG_KARST) { + struct rwnx_phy_conf_file phy_conf; + // We use the NSS parameter as is + // Retrieve the Karst configuration + rwnx_parse_phy_configfile(rwnx_hw, RWNX_PHY_CONFIG_KARST_NAME, + &phy_conf, rwnx_hw->mod_params->phy_cfg); + + memcpy(&rwnx_hw->phy.cfg, &phy_conf.karst, sizeof(phy_conf.karst)); + } else { + WARN_ON(1); + } + + /* + * adjust caps depending on the RF + */ + switch (mdm_phy_cfg) { + case MDM_PHY_CONFIG_TRIDENT: + { + wiphy_dbg(wiphy, "found Trident phy .. limit BW to 40MHz\n"); + rwnx_hw->phy.limit_bw = true; + #ifdef USE_5G +#ifdef CONFIG_VENDOR_RWNX_VHT_NO80 + band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80; +#endif + band_5GHz->vht_cap.cap &= ~(IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_RXSTBC_MASK); + #endif + break; + } + case MDM_PHY_CONFIG_ELMA: + wiphy_dbg(wiphy, "found ELMA phy .. disabling 2.4GHz and greenfield rx\n"); + wiphy->bands[NL80211_BAND_2GHZ] = NULL; + band_2GHz->ht_cap.cap &= ~IEEE80211_HT_CAP_GRN_FLD; + #ifdef USE_5G + band_5GHz->ht_cap.cap &= ~IEEE80211_HT_CAP_GRN_FLD; + band_5GHz->vht_cap.cap &= ~IEEE80211_VHT_CAP_RXSTBC_MASK; + #endif + break; + case MDM_PHY_CONFIG_KARST: + { + wiphy_dbg(wiphy, "found KARST phy\n"); + break; + } + default: + WARN_ON(1); + break; + } +#endif /* CONFIG_RWNX_SDM */ +} +#endif + +int rwnx_handle_dynparams(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy) +{ +#if 0 + /* Check compatibility between requested parameters and HW/SW features */ + int ret; + + ret = rwnx_check_fw_hw_feature(rwnx_hw, wiphy); + if (ret) + return ret; + + /* Allocate the RX buffers according to the maximum AMSDU RX size */ + ret = rwnx_ipc_rxbuf_init(rwnx_hw, + (4 * (rwnx_hw->mod_params->amsdu_rx_max + 1) + 1) * 1024); + if (ret) { + wiphy_err(wiphy, "Cannot allocate the RX buffers\n"); + return ret; + } +#endif + + //check he_mcs max + if(rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81 && + rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81X2 && + rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D89X2 && + rwnx_hw->mod_params->he_mcs_map > IEEE80211_HE_MCS_SUPPORT_0_9){ + rwnx_hw->mod_params->he_mcs_map = IEEE80211_HE_MCS_SUPPORT_0_9; + } else { + rwnx_hw->mod_params->he_mcs_map = IEEE80211_HE_MCS_SUPPORT_0_11; + } + + //check use_80 support + if(rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81 && + rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81X2 && + rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D89X2 && + rwnx_hw->mod_params->use_80 == true){ + rwnx_hw->mod_params->use_80 = false; + } else { + rwnx_hw->mod_params->use_80 = true; + } + + //check sgi80 support + if(rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81 && + rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81X2 && + rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D89X2 && + rwnx_hw->mod_params->sgi80 == true){ + rwnx_hw->mod_params->sgi80 = false; + } else { + rwnx_hw->mod_params->sgi80 = true; + } +#ifdef CONFIG_5M10M + rwnx_hw->mod_params->he_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_7; + rwnx_hw->mod_params->he_mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7; + rwnx_hw->mod_params->use_2040 = false; + rwnx_hw->mod_params->use_80 = false; + rwnx_hw->mod_params->sgi80 = false; +#endif + + /* Set wiphy parameters */ + rwnx_set_wiphy_params(rwnx_hw, wiphy); + /* Set VHT capabilities */ + rwnx_set_vht_capa(rwnx_hw, wiphy); + /* Set HE capabilities */ + rwnx_set_he_capa(rwnx_hw, wiphy); + /* Set HT capabilities */ + rwnx_set_ht_capa(rwnx_hw, wiphy); + /* Set RF specific parameters (shall be done last as it might change some + capabilities previously set) */ +#if 0 + rwnx_set_rf_params(rwnx_hw, wiphy); +#endif + return 0; +} + +void rwnx_custregd(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy) +{ +// For older kernel version, the custom regulatory is applied before the wiphy +// registration (in rwnx_set_wiphy_params()), so nothing has to be done here + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) + wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF; + wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; + + if (!rwnx_hw->mod_params->custregd) + return; + + rtnl_lock(); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) + if (regulatory_set_wiphy_regd_sync(wiphy, getRegdomainFromRwnxDB(wiphy, default_ccode))){ + wiphy_err(wiphy, "Failed to set custom regdomain\n"); + } +#else + if (regulatory_set_wiphy_regd_sync_rtnl(wiphy, getRegdomainFromRwnxDB(wiphy, default_ccode))){ + wiphy_err(wiphy, "Failed to set custom regdomain\n"); + } +#endif + else{ + wiphy_err(wiphy,"\n" + "*******************************************************\n" + "** CAUTION: USING PERMISSIVE CUSTOM REGULATORY RULES **\n" + "*******************************************************\n"); + } + rtnl_unlock(); +#endif +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mod_params.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mod_params.h new file mode 100644 index 0000000..64aa62d --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mod_params.h @@ -0,0 +1,76 @@ +/** + ****************************************************************************** + * + * @file rwnx_mod_params.h + * + * @brief Declaration of module parameters + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_MOD_PARAM_H_ +#define _RWNX_MOD_PARAM_H_ + +struct rwnx_mod_params { + bool ht_on; + bool vht_on; + bool he_on; + int mcs_map; + int he_mcs_map; + bool he_ul_on; + bool ldpc_on; + bool stbc_on; + bool gf_rx_on; + int phy_cfg; + int uapsd_timeout; + bool ap_uapsd_on; + bool sgi; + bool sgi80; + bool use_2040; + bool use_80; + bool custregd; + bool custchan; + int nss; + int amsdu_rx_max; + bool bfmee; + bool bfmer; + bool mesh; + bool murx; + bool mutx; + bool mutx_on; + unsigned int roc_dur_max; + int listen_itv; + bool listen_bcmc; + int lp_clk_ppm; + bool ps_on; + int tx_lft; + int amsdu_maxnb; + int uapsd_queues; + bool tdls; + bool uf; + bool auto_reply; + char *ftl; + bool dpsm; +#ifdef CONFIG_RWNX_FULLMAC + bool ant_div; +#endif /* CONFIG_RWNX_FULLMAC */ +}; + +extern struct rwnx_mod_params rwnx_mod_params; + +struct rwnx_hw; +struct wiphy; +int rwnx_handle_dynparams(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy); +void rwnx_custregd(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy); +void rwnx_enable_wapi(struct rwnx_hw *rwnx_hw); +void rwnx_enable_mfp(struct rwnx_hw *rwnx_hw); +struct ieee80211_regdomain *getRegdomainFromRwnxDB(struct wiphy *wiphy, + char *alpha2); + +struct ieee80211_regdomain *getRegdomainFromRwnxDBIndex(struct wiphy *wiphy, + int index); + + +#endif /* _RWNX_MOD_PARAM_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_rx.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_rx.c new file mode 100755 index 0000000..6b1c7be --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_rx.c @@ -0,0 +1,1712 @@ +/** + **************************************************************************************** + * + * @file rwnx_msg_rx.c + * + * @brief RX function definitions + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ + +#include +#include "rwnx_defs.h" +#include "rwnx_prof.h" +#include "rwnx_tx.h" +#ifdef CONFIG_RWNX_BFMER +#include "rwnx_bfmer.h" +#endif //(CONFIG_RWNX_BFMER) +#ifdef CONFIG_RWNX_FULLMAC +#include "rwnx_debugfs.h" +#include "rwnx_msg_tx.h" +#include "rwnx_tdls.h" +#endif /* CONFIG_RWNX_FULLMAC */ +#include "rwnx_events.h" +#include "rwnx_compat.h" +#include "aicwf_txrxif.h" +#ifdef CONFIG_USE_WIRELESS_EXT +#include "aicwf_wext_linux.h" +#endif +void rwnx_cfg80211_unlink_bss(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif); + +static int rwnx_freq_to_idx(struct rwnx_hw *rwnx_hw, int freq) +{ + struct ieee80211_supported_band *sband; + int band, ch, idx = 0; + + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { +#ifdef CONFIG_RWNX_FULLMAC + sband = rwnx_hw->wiphy->bands[band]; +#endif /* CONFIG_RWNX_FULLMAC */ + if (!sband) { + continue; + } + + for (ch = 0; ch < sband->n_channels; ch++, idx++) { + if (sband->channels[ch].center_freq == freq) { + goto exit; + } + } + } + + BUG_ON(1); + +exit: + // Channel has been found, return the index + return idx; +} + +/*************************************************************************** + * Messages from MM task + **************************************************************************/ +static inline int rwnx_rx_chan_pre_switch_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct rwnx_vif *rwnx_vif; + int chan_idx = ((struct mm_channel_pre_switch_ind *)msg->param)->chan_index; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + REG_SW_SET_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_PSWTCH_BIT); + +#ifdef CONFIG_RWNX_FULLMAC + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (rwnx_vif->up && rwnx_vif->ch_index == chan_idx) { + AICWFDBG(LOGDEBUG, "rwnx_txq_vif_stop\r\n"); + rwnx_txq_vif_stop(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + } + } +#endif /* CONFIG_RWNX_FULLMAC */ + + REG_SW_CLEAR_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_PSWTCH_BIT); + + return 0; +} + +static inline int rwnx_rx_chan_switch_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct rwnx_vif *rwnx_vif; + int chan_idx = ((struct mm_channel_switch_ind *)msg->param)->chan_index; + bool roc = ((struct mm_channel_switch_ind *)msg->param)->roc; + bool roc_tdls = ((struct mm_channel_switch_ind *)msg->param)->roc_tdls; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + REG_SW_SET_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_SWTCH_BIT); + +#ifdef CONFIG_RWNX_FULLMAC + if (roc_tdls) { + u8 vif_index = ((struct mm_channel_switch_ind *)msg->param)->vif_index; + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (rwnx_vif->vif_index == vif_index) { + rwnx_vif->roc_tdls = true; + rwnx_txq_tdls_sta_start(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + } + } + } else if (!roc) { + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (rwnx_vif->up && rwnx_vif->ch_index == chan_idx) { + rwnx_txq_vif_start(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + } + } + } else { + /* Retrieve the allocated RoC element */ + struct rwnx_roc_elem *roc_elem = rwnx_hw->roc_elem; + if (roc_elem) { + /* If mgmt_roc is true, remain on channel has been started by ourself */ + if (!roc_elem->mgmt_roc) { + /* Inform the host that we have switch on the indicated off-channel */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) + cfg80211_ready_on_channel(roc_elem->wdev->netdev, (u64)(rwnx_hw->roc_cookie_cnt), + roc_elem->chan, NL80211_CHAN_HT20, roc_elem->duration, GFP_ATOMIC); + #elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) + cfg80211_ready_on_channel(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt), + roc_elem->chan, NL80211_CHAN_HT20, roc_elem->duration, GFP_ATOMIC); + #else + cfg80211_ready_on_channel(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt), + roc_elem->chan, roc_elem->duration, GFP_ATOMIC); + #endif + } + + /* Keep in mind that we have switched on the channel */ + roc_elem->on_chan = true; + } else { + printk("roc_elem == null\n"); + } + + // Enable traffic on OFF channel queue + rwnx_txq_offchan_start(rwnx_hw); + } + + tasklet_schedule(&rwnx_hw->task); + + rwnx_hw->cur_chanctx = chan_idx; + rwnx_radar_detection_enable_on_cur_channel(rwnx_hw); + +#endif /* CONFIG_RWNX_FULLMAC */ + + REG_SW_CLEAR_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_SWTCH_BIT); + + return 0; +} + +static inline int rwnx_rx_tdls_chan_switch_cfm(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + return 0; +} + +static inline int rwnx_rx_tdls_chan_switch_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ +#ifdef CONFIG_RWNX_FULLMAC + // Enable traffic on OFF channel queue + rwnx_txq_offchan_start(rwnx_hw); + + return 0; +#endif +} + +static inline int rwnx_rx_tdls_chan_switch_base_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct rwnx_vif *rwnx_vif; + u8 vif_index = ((struct tdls_chan_switch_base_ind *)msg->param)->vif_index; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#ifdef CONFIG_RWNX_FULLMAC + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (rwnx_vif->vif_index == vif_index) { + rwnx_vif->roc_tdls = false; + rwnx_txq_tdls_sta_stop(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + } + } + return 0; +#endif +} + +static inline int rwnx_rx_tdls_peer_ps_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct rwnx_vif *rwnx_vif; + u8 vif_index = ((struct tdls_peer_ps_ind *)msg->param)->vif_index; + bool ps_on = ((struct tdls_peer_ps_ind *)msg->param)->ps_on; + +#ifdef CONFIG_RWNX_FULLMAC + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (rwnx_vif->vif_index == vif_index) { + rwnx_vif->sta.tdls_sta->tdls.ps_on = ps_on; + // Update PS status for the TDLS station + rwnx_ps_bh_enable(rwnx_hw, rwnx_vif->sta.tdls_sta, ps_on); + } + } + + return 0; +#endif +} + +static inline int rwnx_rx_remain_on_channel_exp_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ +#ifdef CONFIG_RWNX_FULLMAC + /* Retrieve the allocated RoC element */ + struct rwnx_roc_elem *roc_elem = rwnx_hw->roc_elem; + /* Get VIF on which RoC has been started */ + struct rwnx_vif *rwnx_vif = NULL;// = container_of(roc_elem->wdev, struct rwnx_vif, wdev); + + RWNX_DBG(RWNX_FN_ENTRY_STR); + if(roc_elem == NULL){ + AICWFDBG(LOGERROR,"%s roc_elem == NULL \r\n", __func__); + return 0; + } + + rwnx_vif = container_of(roc_elem->wdev, struct rwnx_vif, wdev); + + +#ifdef CREATE_TRACE_POINTS + /* For debug purpose (use ftrace kernel option) */ + trace_roc_exp(rwnx_vif->vif_index); +#endif + /* If mgmt_roc is true, remain on channel has been started by ourself */ + /* If RoC has been cancelled before we switched on channel, do not call cfg80211 */ + if (!roc_elem->mgmt_roc && roc_elem->on_chan) { + /* Inform the host that off-channel period has expired */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) + cfg80211_remain_on_channel_expired(roc_elem->wdev->netdev, (u64)(rwnx_hw->roc_cookie_cnt), + roc_elem->chan, NL80211_CHAN_HT20, GFP_ATOMIC); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) + cfg80211_remain_on_channel_expired(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt), + roc_elem->chan, NL80211_CHAN_HT20, GFP_ATOMIC); +#else + cfg80211_remain_on_channel_expired(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt), + roc_elem->chan, GFP_ATOMIC); +#endif + } + + /* De-init offchannel TX queue */ + rwnx_txq_offchan_deinit(rwnx_vif); + + /* Increase the cookie counter cannot be zero */ + rwnx_hw->roc_cookie_cnt++; + + if (rwnx_hw->roc_cookie_cnt == 0) { + rwnx_hw->roc_cookie_cnt = 1; + } + + /* Free the allocated RoC element */ + kfree(roc_elem); + rwnx_hw->roc_elem = NULL; + + AICWFDBG(LOGTRACE, "%s exit\r\n", __func__); +#endif /* CONFIG_RWNX_FULLMAC */ + return 0; +} + +static inline int rwnx_rx_p2p_vif_ps_change_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + int vif_idx = ((struct mm_p2p_vif_ps_change_ind *)msg->param)->vif_index; + int ps_state = ((struct mm_p2p_vif_ps_change_ind *)msg->param)->ps_state; + struct rwnx_vif *vif_entry; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#ifdef CONFIG_RWNX_FULLMAC + vif_entry = rwnx_hw->vif_table[vif_idx]; + + if (vif_entry) { + goto found_vif; + } +#endif /* CONFIG_RWNX_FULLMAC */ + + goto exit; + +found_vif: + +#ifdef CONFIG_RWNX_FULLMAC + if (ps_state == MM_PS_MODE_OFF) { + // Start TX queues for provided VIF + rwnx_txq_vif_start(vif_entry, RWNX_TXQ_STOP_VIF_PS, rwnx_hw); + tasklet_schedule(&rwnx_hw->task); + } + + else { + // Stop TX queues for provided VIF + rwnx_txq_vif_stop(vif_entry, RWNX_TXQ_STOP_VIF_PS, rwnx_hw); + } +#endif /* CONFIG_RWNX_FULLMAC */ + +exit: + return 0; +} + +static inline int rwnx_rx_channel_survey_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_channel_survey_ind *ind = (struct mm_channel_survey_ind *)msg->param; + // Get the channel index + int idx = rwnx_freq_to_idx(rwnx_hw, ind->freq); + // Get the survey + struct rwnx_survey_info *rwnx_survey; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (idx > ARRAY_SIZE(rwnx_hw->survey)) + return 0; + + rwnx_survey = &rwnx_hw->survey[idx]; + + // Store the received parameters + rwnx_survey->chan_time_ms = ind->chan_time_ms; + rwnx_survey->chan_time_busy_ms = ind->chan_time_busy_ms; + rwnx_survey->noise_dbm = ind->noise_dbm; + rwnx_survey->filled = (SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY); + + if (ind->noise_dbm != 0) { + rwnx_survey->filled |= SURVEY_INFO_NOISE_DBM; + } + + return 0; +} + +static inline int rwnx_rx_p2p_noa_upd_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + return 0; +} + +static inline int rwnx_rx_rssi_status_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_rssi_status_ind *ind = (struct mm_rssi_status_ind *)msg->param; + int vif_idx = ind->vif_index; + bool rssi_status = ind->rssi_status; + + struct rwnx_vif *vif_entry; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#ifdef CONFIG_RWNX_FULLMAC + vif_entry = rwnx_hw->vif_table[vif_idx]; + if (vif_entry) { + cfg80211_cqm_rssi_notify(vif_entry->ndev, + rssi_status ? NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW : + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + ind->rssi, GFP_ATOMIC); + } +#endif /* CONFIG_RWNX_FULLMAC */ + + return 0; +} + +static inline int rwnx_rx_pktloss_notify_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ +#ifdef CONFIG_RWNX_FULLMAC + struct mm_pktloss_ind *ind = (struct mm_pktloss_ind *)msg->param; + struct rwnx_vif *vif_entry; + int vif_idx = ind->vif_index; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + vif_entry = rwnx_hw->vif_table[vif_idx]; + if (vif_entry) { + cfg80211_cqm_pktloss_notify(vif_entry->ndev, (const u8 *)ind->mac_addr.array, + ind->num_packets, GFP_ATOMIC); + } +#endif /* CONFIG_RWNX_FULLMAC */ + + return 0; +} + +static inline int rwnx_radar_detect_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct radar_pulse_array_desc *pulses = (struct radar_pulse_array_desc *)msg->param; + int i; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + //printk("%s\n", __func__); + + if(pulses->cnt == 0) { + printk("cnt error\n"); + return -1; + } + + if(rwnx_radar_detection_is_enable(&rwnx_hw->radar, pulses->idx)) { + for(i=0; icnt; i++) { + struct rwnx_radar_pulses *p = &rwnx_hw->radar.pulses[pulses->idx]; + + p->buffer[p->index] = pulses->pulse[i]; + p->index = (p->index + 1)%RWNX_RADAR_PULSE_MAX; + if(p->count < RWNX_RADAR_PULSE_MAX) + p->count++; + //printk("pulse=%x\n", pulses->pulse[i]); + } + + if(!work_pending(&rwnx_hw->radar.detection_work)) + schedule_work(&rwnx_hw->radar.detection_work); + } else + printk("not enable\n"); + + return 0; +} + +static inline int rwnx_apm_staloss_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_apm_staloss_ind *ind = (struct mm_apm_staloss_ind *)msg->param; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + memcpy(rwnx_hw->sta_mac_addr, ind->mac_addr, 6); + rwnx_hw->apm_vif_idx = ind->vif_idx; + + queue_work(rwnx_hw->apmStaloss_wq, &rwnx_hw->apmStalossWork); + + return 0; +} + + +static inline int rwnx_rx_csa_counter_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_csa_counter_ind *ind = (struct mm_csa_counter_ind *)msg->param; + struct rwnx_vif *vif; + bool found = false; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + // Look for VIF entry + list_for_each_entry(vif, &rwnx_hw->vifs, list) { + if (vif->vif_index == ind->vif_index) { + found=true; + break; + } + } + + if (found) { +#ifdef CONFIG_RWNX_FULLMAC + if (vif->ap.csa) + vif->ap.csa->count = ind->csa_count; + else + netdev_err(vif->ndev, "CSA counter update but no active CSA"); + +#endif + } + + return 0; +} + +#ifdef CONFIG_RWNX_FULLMAC +static inline int rwnx_rx_csa_finish_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_csa_finish_ind *ind = (struct mm_csa_finish_ind *)msg->param; + struct rwnx_vif *vif; + bool found = false; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + // Look for VIF entry + list_for_each_entry(vif, &rwnx_hw->vifs, list) { + if (vif->vif_index == ind->vif_index) { + found=true; + break; + } + } + + if (found) { + if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP || + RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_P2P_GO) { + if (vif->ap.csa) { + vif->ap.csa->status = ind->status; + vif->ap.csa->ch_idx = ind->chan_idx; + schedule_work(&vif->ap.csa->work); + } else + netdev_err(vif->ndev, "CSA finish indication but no active CSA"); + } else { + if (ind->status == 0) { + rwnx_chanctx_unlink(vif); + rwnx_chanctx_link(vif, ind->chan_idx, NULL); + if (rwnx_hw->cur_chanctx == ind->chan_idx) { + rwnx_radar_detection_enable_on_cur_channel(rwnx_hw); + rwnx_txq_vif_start(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + } else + rwnx_txq_vif_stop(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw); + } + } + } + + return 0; +} + +static inline int rwnx_rx_csa_traffic_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_csa_traffic_ind *ind = (struct mm_csa_traffic_ind *)msg->param; + struct rwnx_vif *vif; + bool found = false; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + // Look for VIF entry + list_for_each_entry(vif, &rwnx_hw->vifs, list) { + if (vif->vif_index == ind->vif_index) { + found=true; + break; + } + } + + if (found) { + if (ind->enable) + rwnx_txq_vif_start(vif, RWNX_TXQ_STOP_CSA, rwnx_hw); + else + rwnx_txq_vif_stop(vif, RWNX_TXQ_STOP_CSA, rwnx_hw); + } + + return 0; +} + +static inline int rwnx_rx_ps_change_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_ps_change_ind *ind = (struct mm_ps_change_ind *)msg->param; + struct rwnx_sta *sta = &rwnx_hw->sta_table[ind->sta_idx]; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#if 1//2022-01-15 add for rwnx_hw->vif_table[sta->vif_idx] if null when rwnx_close + if (!rwnx_hw->vif_table[sta->vif_idx]){ + wiphy_err(rwnx_hw->wiphy, "rwnx_hw->vif_table[sta->vif_idx] is null\n"); + return 0; + } +#endif + + if (ind->sta_idx >= (NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX)) { + wiphy_err(rwnx_hw->wiphy, "Invalid sta index reported by fw %d\n", + ind->sta_idx); + return 1; + } + + netdev_dbg(rwnx_hw->vif_table[sta->vif_idx]->ndev, + "Sta %d, change PS mode to %s", sta->sta_idx, + ind->ps_state ? "ON" : "OFF"); + + if (sta->valid) { + rwnx_ps_bh_enable(rwnx_hw, sta, ind->ps_state); + } else if (rwnx_hw->adding_sta) { + sta->ps.active = ind->ps_state ? true : false; + } else { + netdev_err(rwnx_hw->vif_table[sta->vif_idx]->ndev, + "Ignore PS mode change on invalid sta\n"); + } + + + return 0; +} + + +static inline int rwnx_rx_traffic_req_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mm_traffic_req_ind *ind = (struct mm_traffic_req_ind *)msg->param; + struct rwnx_sta *sta = &rwnx_hw->sta_table[ind->sta_idx]; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + netdev_dbg(rwnx_hw->vif_table[sta->vif_idx]->ndev, + "Sta %d, asked for %d pkt", sta->sta_idx, ind->pkt_cnt); + + rwnx_ps_bh_traffic_req(rwnx_hw, sta, ind->pkt_cnt, + ind->uapsd ? UAPSD_ID : LEGACY_PS_ID); + + return 0; +} +#endif /* CONFIG_RWNX_FULLMAC */ + +/*************************************************************************** + * Messages from SCAN task + **************************************************************************/ +#if 0 +static inline int rwnx_rx_scan_done_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + struct cfg80211_scan_info info = { + .aborted = false, + }; +#endif + RWNX_DBG(RWNX_FN_ENTRY_STR); + + rwnx_ipc_elem_var_deallocs(rwnx_hw, &rwnx_hw->scan_ie); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + ieee80211_scan_completed(rwnx_hw->hw, &info); +#else + ieee80211_scan_completed(rwnx_hw->hw, false); +#endif + + return 0; +} +#endif + +/*************************************************************************** + * Messages from SCANU task + **************************************************************************/ +#ifdef CONFIG_RWNX_FULLMAC +static inline int rwnx_rx_scanu_start_cfm(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + struct cfg80211_scan_info info = { + .aborted = false, + }; +#endif + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (rwnx_hw->scan_request +#ifdef CONFIG_SCHED_SCAN + && !rwnx_hw->is_sched_scan +#endif//CONFIG_SCHED_SCAN +#ifdef CONFIG_USE_WIRELESS_EXT + && !rwnx_hw->wext_scan) { +#else + ){ +#endif + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + cfg80211_scan_done(rwnx_hw->scan_request, &info); +#else + cfg80211_scan_done(rwnx_hw->scan_request, false); +#endif + } + + +#ifdef CONFIG_USE_WIRELESS_EXT + else if(rwnx_hw->wext_scan){ + rwnx_hw->wext_scan = 0; + AICWFDBG(LOGDEBUG, "%s rwnx_hw->wext_scan done!!\r\n", __func__); + if(rwnx_hw->scan_request){ + vfree(rwnx_hw->scan_request); + } + complete(&rwnx_hw->wext_scan_com); + } +#endif + else { + AICWFDBG(LOGERROR, "%s rwnx_hw->scan_request is NULL!!\r\n", __func__); + } + +#ifdef CONFIG_SCHED_SCAN + if(rwnx_hw->is_sched_scan){ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) + AICWFDBG(LOGINFO, "%s cfg80211_sched_scan_results \r\n", __func__); + cfg80211_sched_scan_results(rwnx_hw->scan_request->wiphy, + rwnx_hw->sched_scan_req->reqid); +#else + cfg80211_sched_scan_results(rwnx_hw->sched_scan_req->wiphy); +#endif + kfree(rwnx_hw->scan_request); + rwnx_hw->is_sched_scan = false; + } +#endif//CONFIG_SCHED_SCAN + + rwnx_hw->scan_request = NULL; + rwnx_hw->scanning = 0; + + return 0; +} + +static inline int rwnx_rx_scanu_result_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct cfg80211_bss *bss = NULL; + struct ieee80211_channel *chan; + struct scanu_result_ind *ind = (struct scanu_result_ind *)msg->param; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)ind->payload; + u64 tsf; + u8 *ie; + size_t ielen; + u16 capability, beacon_interval; + u16 len = ind->length; + +#if 0 + const u8 *ie = mgmt->u.beacon.variable; + char *ssid = NULL; + int ssid_len = 0; + int freq = 0; +#endif + +#ifdef CONFIG_USE_WIRELESS_EXT + struct scanu_result_wext *scan_re_wext; +#endif + + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + chan = ieee80211_get_channel(rwnx_hw->wiphy, ind->center_freq); + + if (chan != NULL) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + struct timespec ts; + get_monotonic_boottime(&ts); + tsf = (u64)ts.tv_sec * 1000000 + div_u64(ts.tv_nsec, 1000); + mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000) + ts.tv_nsec/1000; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) + struct timespec ts; + ts = ktime_to_timespec(ktime_get_boottime()); + tsf = (u64)ts.tv_sec * 1000000 + div_u64(ts.tv_nsec, 1000); + mgmt->u.probe_resp.timestamp = tsf; +#else + struct timespec64 ts; + ts = ktime_to_timespec64(ktime_get_boottime()); + tsf = (u64)ts.tv_sec * 1000000 + div_u64(ts.tv_nsec, 1000); + mgmt->u.probe_resp.timestamp = tsf; +#endif + ie = mgmt->u.probe_resp.variable; + ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); + capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); + /* framework use system bootup time */ + bss = cfg80211_inform_bss(rwnx_hw->wiphy, chan, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + CFG80211_BSS_FTYPE_UNKNOWN, +#endif + mgmt->bssid, tsf, capability, beacon_interval, + ie, ielen, ind->rssi * 100, GFP_ATOMIC); + +#if 0 + //print scan result info start + if(ie != NULL && bss != NULL){ + ssid_len = ie[1]; + ssid = (char *)vmalloc(sizeof(char)* (ssid_len + 1)); + if(ssid != NULL){ + memset(ssid, 0, ssid_len + 1); + memcpy(ssid, &ie[2], ssid_len); + freq = ind->center_freq; + AICWFDBG(LOGDEBUG, "%s %02x:%02x:%02x:%02x:%02x:%02x ssid:%s freq:%d timestamp:%ld\r\n", __func__, + bss->bssid[0],bss->bssid[1],bss->bssid[2], + bss->bssid[3],bss->bssid[4],bss->bssid[5], + ssid, freq, (long)mgmt->u.probe_resp.timestamp); + vfree(ssid); + ssid = NULL; + }else{ + AICWFDBG(LOGERROR, "%s ssid vmalloc fail skip printk ssid info \r\n", __func__); + } + } + //print scan result info end +#endif + +#ifdef CONFIG_USE_WIRELESS_EXT + if (!bss || !bss->bssid) { + AICWFDBG(LOGERROR, "%s: Invalid BSS structure\n", __func__); + goto putbss; + } + list_for_each_entry(scan_re_wext, &rwnx_hw->wext_scanre_list, scanu_re_list) { + if (!scan_re_wext || !scan_re_wext->bss || !scan_re_wext->bss->bssid) { + AICWFDBG(LOGDEBUG, "%s: Corrupted list entry detected\n", __func__); + continue; + } + if (!memcmp(scan_re_wext->bss->bssid, bss->bssid, ETH_ALEN)) { + AICWFDBG(LOGDEBUG, "%s: BSSID already exists, no need to add again\r\n", __func__); + goto putbss; + } + } + scan_re_wext = (struct scanu_result_wext *)vmalloc(sizeof(struct scanu_result_wext)); + scan_re_wext->ind = (struct scanu_result_ind *)vmalloc(sizeof(struct scanu_result_ind)); + scan_re_wext->payload = (u32_l *)vmalloc(sizeof(u32_l) * ind->length); + + memset(scan_re_wext->ind, 0, sizeof(struct scanu_result_ind)); + memset(scan_re_wext->payload, 0, ind->length); + + memcpy(scan_re_wext->ind, ind, sizeof(struct scanu_result_ind)); + memcpy(scan_re_wext->payload, ind->payload, ind->length); + + scan_re_wext->bss = bss; + + INIT_LIST_HEAD(&scan_re_wext->scanu_re_list); + list_add_tail(&scan_re_wext->scanu_re_list, &rwnx_hw->wext_scanre_list); + return 0; +#endif + + } +putbss: + if (bss != NULL) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) + cfg80211_put_bss(bss); +#else + cfg80211_put_bss(rwnx_hw->wiphy, bss); +#endif + + return 0; +} +#endif /* CONFIG_RWNX_FULLMAC */ + +/*************************************************************************** + * Messages from ME task + **************************************************************************/ +#ifdef CONFIG_RWNX_FULLMAC +static inline int rwnx_rx_me_tkip_mic_failure_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct me_tkip_mic_failure_ind *ind = (struct me_tkip_mic_failure_ind *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx]; + struct net_device *dev = rwnx_vif->ndev; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + cfg80211_michael_mic_failure(dev, (u8 *)&ind->addr, (ind->ga?NL80211_KEYTYPE_GROUP: + NL80211_KEYTYPE_PAIRWISE), ind->keyid, + (u8 *)&ind->tsc, GFP_ATOMIC); + + return 0; +} + +static inline int rwnx_rx_me_tx_credits_update_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct me_tx_credits_update_ind *ind = (struct me_tx_credits_update_ind *)msg->param; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + rwnx_txq_credit_update(rwnx_hw, ind->sta_idx, ind->tid, ind->credits); + + return 0; +} +#endif /* CONFIG_RWNX_FULLMAC */ + +/*************************************************************************** + * Messages from SM task + **************************************************************************/ +#ifdef CONFIG_RWNX_FULLMAC + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) +static inline void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, + struct ieee80211_channel *chan, + enum nl80211_channel_type chan_type) +{ + if (WARN_ON(!chan)) + return; + chandef->chan = chan; + chandef->center_freq2 = 0; + switch (chan_type) { + case NL80211_CHAN_NO_HT: + chandef->width = NL80211_CHAN_WIDTH_20_NOHT; + chandef->center_freq1 = chan->center_freq; + break; + case NL80211_CHAN_HT20: + chandef->width = NL80211_CHAN_WIDTH_20; + chandef->center_freq1 = chan->center_freq; + break; + case NL80211_CHAN_HT40PLUS: + chandef->width = NL80211_CHAN_WIDTH_40; + chandef->center_freq1 = chan->center_freq + 10; + break; + case NL80211_CHAN_HT40MINUS: + chandef->width = NL80211_CHAN_WIDTH_40; + chandef->center_freq1 = chan->center_freq - 10; + break; + default: + WARN_ON(1); + } +} +#endif + +static inline int rwnx_rx_sm_connect_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct sm_connect_ind *ind = (struct sm_connect_ind *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx]; + struct net_device *dev = NULL; + const u8 *req_ie, *rsp_ie; + const u8 *extcap_ie; + const struct ieee_types_extcap *extcap; + struct ieee80211_channel *chan; + //struct cfg80211_bss *bss = NULL; + struct wireless_dev *wdev = NULL; + //int retry_counter = 10; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if(!rwnx_vif){ + AICWFDBG(LOGERROR, "%s rwnx_vif is null \r\n", __func__); + return 0; + } + dev = rwnx_vif->ndev; + wdev = dev->ieee80211_ptr; + + /* Retrieve IE addresses and lengths */ + req_ie = (const u8 *)ind->assoc_ie_buf; + rsp_ie = req_ie + ind->assoc_req_ie_len; + + + if (rwnx_vif->sta.external_auth) + rwnx_vif->sta.external_auth = false; + + + // Fill-in the AP information + AICWFDBG(LOGINFO, "%s ind->status_code:%d \r\n", __func__, ind->status_code); + if (ind->status_code == 0) + { + struct rwnx_sta *sta = &rwnx_hw->sta_table[ind->ap_idx]; + u8 txq_status; + struct cfg80211_chan_def chandef; + + sta->valid = true; + sta->sta_idx = ind->ap_idx; + sta->ch_idx = ind->ch_idx; + sta->vif_idx = ind->vif_idx; + sta->vlan_idx = sta->vif_idx; + sta->qos = ind->qos; + sta->acm = ind->acm; + sta->ps.active = false; + sta->aid = ind->aid; + sta->band = ind->band; + sta->width = ind->width; + sta->center_freq = ind->center_freq; + sta->center_freq1 = ind->center_freq1; + sta->center_freq2 = ind->center_freq2; + rwnx_vif->sta.ap = sta; + + chan = ieee80211_get_channel(rwnx_hw->wiphy, ind->center_freq); + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + if (!rwnx_hw->mod_params->ht_on) + chandef.width = NL80211_CHAN_WIDTH_20_NOHT; + else + chandef.width = chnl2bw[ind->width]; + chandef.center_freq1 = ind->center_freq1; + chandef.center_freq2 = ind->center_freq2; + rwnx_chanctx_link(rwnx_vif, ind->ch_idx, &chandef); + memcpy(sta->mac_addr, ind->bssid.array, ETH_ALEN); + if (ind->ch_idx == rwnx_hw->cur_chanctx) { + txq_status = 0; + } else { + txq_status = RWNX_TXQ_STOP_CHAN; + } + memcpy(sta->ac_param, ind->ac_param, sizeof(sta->ac_param)); + rwnx_txq_sta_init(rwnx_hw, sta, txq_status); + rwnx_txq_tdls_vif_init(rwnx_vif); +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_register_rc_stat(rwnx_hw, sta); +#endif + rwnx_mu_group_sta_init(sta, NULL); + /* Look for TDLS Channel Switch Prohibited flag in the Extended Capability + * Information Element*/ + extcap_ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, rsp_ie, ind->assoc_rsp_ie_len); + if (extcap_ie && extcap_ie[1] >= 5) { + extcap = (void *)(extcap_ie); + rwnx_vif->tdls_chsw_prohibited = extcap->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED; + } + + if(atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt) > 0) { + AICWFDBG(LOGDEBUG, "sta idx %d fc error %d\n", sta->sta_idx, atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt)); + } + +#ifdef CONFIG_RADAR_OR_IR_DETECT + if (chan->flags & IEEE80211_CHAN_RADAR) + rwnx_radar_detection_enable(&rwnx_hw->radar, + RWNX_RADAR_DETECT_REPORT, + RWNX_RADAR_RIU); +#endif + + if (rwnx_vif->wep_enabled) + rwnx_vif->wep_auth_err = false; + +#ifdef CONFIG_RWNX_BFMER + /* If Beamformer feature is activated, check if features can be used + * with the new peer device + */ + if (rwnx_hw->mod_params->bfmer) { + const u8 *vht_capa_ie; + const struct ieee80211_vht_cap *vht_cap; + + do { + /* Look for VHT Capability Information Element */ + vht_capa_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, rsp_ie, + ind->assoc_rsp_ie_len); + + /* Stop here if peer device does not support VHT */ + if (!vht_capa_ie) { + break; + } + + vht_cap = (const struct ieee80211_vht_cap *)(vht_capa_ie + 2); + + /* Send MM_BFMER_ENABLE_REQ message if needed */ + rwnx_send_bfmer_enable(rwnx_hw, sta, vht_cap); + } while (0); + } +#endif //(CONFIG_RWNX_BFMER) + +#ifdef CONFIG_RWNX_MON_DATA + // If there are 1 sta and 1 monitor interface active at the same time then + // monitor interface channel context is always the same as the STA interface. + // This doesn't work with 2 STA interfaces but we don't want to support it. + if (rwnx_hw->monitor_vif != RWNX_INVALID_VIF) { + struct rwnx_vif *rwnx_mon_vif = rwnx_hw->vif_table[rwnx_hw->monitor_vif]; + rwnx_chanctx_unlink(rwnx_mon_vif); + rwnx_chanctx_link(rwnx_mon_vif, ind->ch_idx, NULL); + } +#endif + //atomic_set(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_CONNECTED); + + } else if (ind->status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { + if (rwnx_vif->wep_enabled) { + rwnx_vif->wep_auth_err = true; + AICWFDBG(LOGINFO, "con ind wep_auth_err %d\n", rwnx_vif->wep_auth_err); + } + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED); + }else{ + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED); + } + + + AICWFDBG(LOGINFO, "%s ind->roamed:%d ind->status_code:%d rwnx_vif->drv_conn_state:%d\r\n", + __func__, + ind->roamed, + ind->status_code, + (int)atomic_read(&rwnx_vif->drv_conn_state)); + + if(ind->status_code == 0 && (int)atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_DISCONNECTING){ + AICWFDBG(LOGINFO, "%s the disconnection has been requested, return it\r\n", __func__); + goto exit; + } + + + if (!ind->roamed) {//not roaming + cfg80211_connect_result(dev, (const u8 *)ind->bssid.array, req_ie, + ind->assoc_req_ie_len, rsp_ie, + ind->assoc_rsp_ie_len, ind->status_code, + GFP_ATOMIC); + if (ind->status_code == 0) { + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_CONNECTED); + } else { + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED); + rwnx_external_auth_disable(rwnx_vif); + } + AICWFDBG(LOGINFO, "%s cfg80211_connect_result pass, rwnx_vif->drv_conn_state:%d\r\n", __func__, (int)atomic_read(&rwnx_vif->drv_conn_state)); + } + else {//roaming + if(ind->status_code != 0){ + AICWFDBG(LOGINFO, "%s roaming fail to notify disconnect \r\n", __func__); + cfg80211_disconnected(dev, 0, NULL, 0,1, GFP_ATOMIC); + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED); + rwnx_external_auth_disable(rwnx_vif); + }else{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) + struct cfg80211_roam_info info; + memset(&info, 0, sizeof(info)); + +#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION + if (rwnx_vif->ch_index < NX_CHAN_CTXT_CNT) + info.channel = rwnx_hw->chanctx_table[rwnx_vif->ch_index].chan_def.chan; + info.bssid = (const u8 *)ind->bssid.array; + +#else + if (rwnx_vif->ch_index < NX_CHAN_CTXT_CNT) + info.links[0].channel = rwnx_hw->chanctx_table[rwnx_vif->ch_index].chan_def.chan; + info.links[0].bssid = (const u8 *)ind->bssid.array;; +#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION + + info.req_ie = req_ie; + info.req_ie_len = ind->assoc_req_ie_len; + info.resp_ie = rsp_ie; + info.resp_ie_len = ind->assoc_rsp_ie_len; + AICWFDBG(LOGINFO, "%s roaming success to notify roam \r\n", __func__); + cfg80211_roamed(dev, &info, GFP_ATOMIC); +#else + chan = ieee80211_get_channel(rwnx_hw->wiphy, ind->center_freq); + AICWFDBG(LOGINFO, "%s roaming success to notify roam \r\n", __func__); + cfg80211_roamed(dev +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) || defined(COMPAT_KERNEL_RELEASE) + , chan +#endif + , (const u8 *)ind->bssid.array + , req_ie + , ind->assoc_req_ie_len + , rsp_ie + , ind->assoc_rsp_ie_len + , GFP_ATOMIC); +#endif /*LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)*/ + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_CONNECTED); + } + rwnx_vif->sta.is_roam = false; + } + + if (ind->status_code == 0) { + netif_tx_start_all_queues(dev); + netif_carrier_on(dev); + } + +exit: + rwnx_vif->sta.is_roam = false; +#ifdef CONFIG_DYNAMIC_PWR + rwnx_hw->sta_rssi_idx = ind->ap_idx; +#endif + + return 0; +} + +void rwnx_cfg80211_unlink_bss(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif){ + struct wiphy *wiphy = rwnx_hw->wiphy; + struct cfg80211_bss *bss = NULL; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/, + rwnx_vif->sta.bssid, rwnx_vif->sta.ssid, + rwnx_vif->sta.ssid_len, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + IEEE80211_BSS_TYPE_ESS, + IEEE80211_PRIVACY(true));//temp set true +#else + WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); +#endif + + if (bss) { + cfg80211_unlink_bss(wiphy, bss); + AICWFDBG(LOGINFO, "%s(): cfg80211_unlink %s!!\n", __func__, rwnx_vif->sta.ssid); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + cfg80211_put_bss(wiphy, bss); +#else + cfg80211_put_bss(bss); +#endif + }else{ + AICWFDBG(LOGINFO, "%s(): cfg80211_unlink error %s!!\n", __func__, rwnx_vif->sta.ssid); + } +} + +extern u8 dhcped; +static inline int rwnx_rx_sm_disconnect_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct sm_disconnect_ind *ind = (struct sm_disconnect_ind *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx]; + struct net_device *dev; +#ifdef AICWF_RX_REORDER + struct reord_ctrl_info *reord_info, *tmp; + u8 *macaddr; + struct aicwf_rx_priv *rx_priv; +#endif + + //RWNX_DBG(RWNX_FN_ENTRY_STR); + AICWFDBG(LOGINFO, "%s reason code:%d \r\n", __func__, ind->reason_code); + if((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_DISCONNECTED){ + AICWFDBG(LOGINFO, "%s, is already disconnected, drop disconnect ind", __func__); + return 0; + } + + dhcped = 0; + + if(!rwnx_vif){ + AICWFDBG(LOGERROR, "%s rwnx_vif is null \r\n", __func__); + //atomic_set(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTED); + return 0; + } + dev = rwnx_vif->ndev; + if (rwnx_vif->sta.is_roam == false) { + rwnx_cfg80211_unlink_bss(rwnx_hw, rwnx_vif); + } else { + AICWFDBG(LOGINFO, "%s roaming no rwnx_cfg80211_unlink_bss \r\n", __func__); + } + +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_vif->sta.ap); +#endif + +#ifdef CONFIG_BR_SUPPORT + struct rwnx_vif *vif = netdev_priv(dev); + /* clear bridge database */ + nat25_db_cleanup(rwnx_vif); +#endif /* CONFIG_BR_SUPPORT */ + + if(rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + rwnx_hw->is_p2p_connected = 0; + /* if vif is not up, rwnx_close has already been called */ + if (rwnx_vif->up) { + if (!ind->ft_over_ds && !ind->reassoc) { + cfg80211_disconnected(dev, ind->reason_code, NULL, 0, + (ind->reason_code < 1), GFP_ATOMIC); + } + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + } + +#ifdef CONFIG_RWNX_BFMER + /* Disable Beamformer if supported */ + rwnx_bfmer_report_del(rwnx_hw, rwnx_vif->sta.ap); +#endif //(CONFIG_RWNX_BFMER) + +#ifdef AICWF_RX_REORDER +#ifdef AICWF_SDIO_SUPPORT + rx_priv = rwnx_hw->sdiodev->rx_priv; +#else + rx_priv = rwnx_hw->usbdev->rx_priv; +#endif + if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) { + macaddr = (u8*)rwnx_vif->ndev->dev_addr; + AICWFDBG(LOGINFO, "deinit:macaddr:%x,%x,%x,%x,%x,%x\r\n", macaddr[0],macaddr[1],macaddr[2], \ + macaddr[3],macaddr[4],macaddr[5]); + list_for_each_entry_safe(reord_info, tmp, &rx_priv->stas_reord_list, list) { + macaddr = (u8*)rwnx_vif->ndev->dev_addr; + AICWFDBG(LOGINFO, "reord_mac:%x,%x,%x,%x,%x,%x\r\n", reord_info->mac_addr[0],reord_info->mac_addr[1],reord_info->mac_addr[2], \ + reord_info->mac_addr[3],reord_info->mac_addr[4],reord_info->mac_addr[5]); + if (!memcmp(reord_info->mac_addr, macaddr, 6)) { + reord_deinit_sta(rx_priv, reord_info); + break; + } + } + } + else if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) + BUG();//should be not here: del_sta function +#endif + + rwnx_txq_sta_deinit(rwnx_hw, rwnx_vif->sta.ap); + rwnx_txq_tdls_vif_deinit(rwnx_vif); + #if 0 + rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_vif->sta.ap); + #endif + rwnx_vif->sta.ap->valid = false; + rwnx_vif->sta.ap = NULL; + rwnx_external_auth_disable(rwnx_vif); + rwnx_chanctx_unlink(rwnx_vif); + + //msleep(200); + if (rwnx_vif->sta.is_roam == false) { + rwnx_set_conn_state(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED); + } + + return 0; +} + +static inline int rwnx_rx_sm_external_auth_required_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct sm_external_auth_required_ind *ind = + (struct sm_external_auth_required_ind *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx]; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL) + struct net_device *dev = rwnx_vif->ndev; + struct cfg80211_external_auth_params params; + int ret = 0; + struct wireless_dev *wdev = dev->ieee80211_ptr; + int retry_counter = 10; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + memset((void*)¶ms, 0, sizeof(struct cfg80211_external_auth_params)); + + params.action = NL80211_EXTERNAL_AUTH_START; + memcpy(params.bssid, ind->bssid.array, ETH_ALEN); + params.ssid.ssid_len = ind->ssid.length; + memcpy(params.ssid.ssid, ind->ssid.array, + min_t(size_t, ind->ssid.length, sizeof(params.ssid.ssid))); + params.key_mgmt_suite = ind->akm; + + while (wdev->conn_owner_nlportid == 0) { + AICWFDBG(LOGINFO, "%s WARNING conn_owner_nlportid = 0, msleep 100ms.\r\n", __func__); + msleep(100); + retry_counter--; + if (retry_counter == 0) { + break; + } + } + AICWFDBG(LOGINFO, "%s wdev->conn_owner_nlportid:%d \r\n", __func__, (int)wdev->conn_owner_nlportid); + + if ((ind->vif_idx > NX_VIRT_DEV_MAX) || !rwnx_vif->up || + (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_STATION) || + (ret = cfg80211_external_auth_request(dev, ¶ms, GFP_ATOMIC))) { + wiphy_err(rwnx_hw->wiphy, "Failed to start external auth on vif %d, rwnx_vif->up %d, iftype:%d, ret %d", + ind->vif_idx, rwnx_vif->up, RWNX_VIF_TYPE(rwnx_vif), ret); + rwnx_send_sm_external_auth_required_rsp(rwnx_hw, rwnx_vif, + WLAN_STATUS_UNSPECIFIED_FAILURE); + return 0; + } + + rwnx_external_auth_enable(rwnx_vif); +#else + rwnx_send_sm_external_auth_required_rsp(rwnx_hw, rwnx_vif, + WLAN_STATUS_UNSPECIFIED_FAILURE); +#endif + return 0; +} + + +static inline int rwnx_rx_mesh_path_create_cfm(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mesh_path_create_cfm *cfm = (struct mesh_path_create_cfm *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[cfm->vif_idx]; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Check we well have a Mesh Point Interface */ + if (rwnx_vif && (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT)) { + rwnx_vif->ap.create_path = false; + } + + return 0; +} + +static inline int rwnx_rx_mesh_peer_update_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mesh_peer_update_ind *ind = (struct mesh_peer_update_ind *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx]; + struct rwnx_sta *rwnx_sta = &rwnx_hw->sta_table[ind->sta_idx]; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if ((ind->vif_idx >= (NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX)) || + (rwnx_vif && (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)) || + (ind->sta_idx >= NX_REMOTE_STA_MAX)) + return 1; + + /* Check we well have a Mesh Point Interface */ + if (!rwnx_vif->user_mpm) + { + /* Check if peer link has been established or lost */ + if (ind->estab) { + if (!rwnx_sta->valid) { + u8 txq_status; + + rwnx_sta->valid = true; + rwnx_sta->sta_idx = ind->sta_idx; + rwnx_sta->ch_idx = rwnx_vif->ch_index; + rwnx_sta->vif_idx = ind->vif_idx; + rwnx_sta->vlan_idx = rwnx_sta->vif_idx; + rwnx_sta->ps.active = false; + rwnx_sta->qos = true; + rwnx_sta->aid = ind->sta_idx + 1; + //rwnx_sta->acm = ind->acm; + memcpy(rwnx_sta->mac_addr, ind->peer_addr.array, ETH_ALEN); + + rwnx_chanctx_link(rwnx_vif, rwnx_sta->ch_idx, NULL); + + /* Add the station in the list of VIF's stations */ + INIT_LIST_HEAD(&rwnx_sta->list); + list_add_tail(&rwnx_sta->list, &rwnx_vif->ap.sta_list); + + /* Initialize the TX queues */ + if (rwnx_sta->ch_idx == rwnx_hw->cur_chanctx) { + txq_status = 0; + } else { + txq_status = RWNX_TXQ_STOP_CHAN; + } + + rwnx_txq_sta_init(rwnx_hw, rwnx_sta, txq_status); +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_register_rc_stat(rwnx_hw, rwnx_sta); +#endif +#ifdef CONFIG_RWNX_BFMER + // TODO: update indication to contains vht capabilties + if (rwnx_hw->mod_params->bfmer) + rwnx_send_bfmer_enable(rwnx_hw, rwnx_sta, NULL); + + rwnx_mu_group_sta_init(rwnx_sta, NULL); +#endif /* CONFIG_RWNX_BFMER */ + + } else { + WARN_ON(0); + } + } else { + if (rwnx_sta->valid) { + rwnx_sta->ps.active = false; + rwnx_sta->valid = false; + + /* Remove the station from the list of VIF's station */ + list_del_init(&rwnx_sta->list); + + rwnx_txq_sta_deinit(rwnx_hw, rwnx_sta); +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_sta); +#endif + } else { + WARN_ON(0); + } + } + } else { + if (!ind->estab && rwnx_sta->valid) { + /* There is no way to inform upper layer for lost of peer, still + clean everything in the driver */ + rwnx_sta->ps.active = false; + rwnx_sta->valid = false; + + /* Remove the station from the list of VIF's station */ + list_del_init(&rwnx_sta->list); + + rwnx_txq_sta_deinit(rwnx_hw, rwnx_sta); +#ifdef CONFIG_DEBUG_FS + rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_sta); +#endif + } else { + WARN_ON(0); + } + } + + return 0; +} + +static inline int rwnx_rx_mesh_path_update_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mesh_path_update_ind *ind = (struct mesh_path_update_ind *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx]; + struct rwnx_mesh_path *mesh_path; + bool found = false; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (ind->vif_idx >= (NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX)) + return 1; + + if (!rwnx_vif || (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)) + return 0; + + /* Look for path with provided target address */ + list_for_each_entry(mesh_path, &rwnx_vif->ap.mpath_list, list) { + if (mesh_path->path_idx == ind->path_idx) { + found = true; + break; + } + } + + /* Check if element has been deleted */ + if (ind->delete) { + if (found) { +#ifdef CREATE_TRACE_POINTS + trace_mesh_delete_path(mesh_path); +#endif + /* Remove element from list */ + list_del_init(&mesh_path->list); + /* Free the element */ + kfree(mesh_path); + } + } + else { + if (found) { + // Update the Next Hop STA + mesh_path->p_nhop_sta = &rwnx_hw->sta_table[ind->nhop_sta_idx]; +#ifdef CREATE_TRACE_POINTS + trace_mesh_update_path(mesh_path); +#endif + } else { + // Allocate a Mesh Path structure + mesh_path = (struct rwnx_mesh_path *)kmalloc(sizeof(struct rwnx_mesh_path), GFP_ATOMIC); + + if (mesh_path) { + INIT_LIST_HEAD(&mesh_path->list); + + mesh_path->path_idx = ind->path_idx; + mesh_path->p_nhop_sta = &rwnx_hw->sta_table[ind->nhop_sta_idx]; + memcpy(&mesh_path->tgt_mac_addr, &ind->tgt_mac_addr, MAC_ADDR_LEN); + + // Insert the path in the list of path + list_add_tail(&mesh_path->list, &rwnx_vif->ap.mpath_list); +#ifdef CREATE_TRACE_POINTS + trace_mesh_create_path(mesh_path); +#endif + } + } + } + + return 0; +} + +static inline int rwnx_rx_mesh_proxy_update_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + struct mesh_proxy_update_ind *ind = (struct mesh_proxy_update_ind *)msg->param; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx]; + struct rwnx_mesh_proxy *mesh_proxy; + bool found = false; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (ind->vif_idx >= (NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX)) + return 1; + + if (!rwnx_vif || (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)) + return 0; + + /* Look for path with provided external STA address */ + list_for_each_entry(mesh_proxy, &rwnx_vif->ap.proxy_list, list) { + if (!memcmp(&ind->ext_sta_addr, &mesh_proxy->ext_sta_addr, ETH_ALEN)) { + found = true; + break; + } + } + + if (ind->delete && found) { + /* Delete mesh path */ + list_del_init(&mesh_proxy->list); + kfree(mesh_proxy); + } else if (!ind->delete && !found) { + /* Allocate a Mesh Path structure */ + mesh_proxy = (struct rwnx_mesh_proxy *)kmalloc(sizeof(*mesh_proxy), + GFP_ATOMIC); + + if (mesh_proxy) { + INIT_LIST_HEAD(&mesh_proxy->list); + + memcpy(&mesh_proxy->ext_sta_addr, &ind->ext_sta_addr, MAC_ADDR_LEN); + mesh_proxy->local = ind->local; + + if (!ind->local) { + memcpy(&mesh_proxy->proxy_addr, &ind->proxy_mac_addr, MAC_ADDR_LEN); + } + + /* Insert the path in the list of path */ + list_add_tail(&mesh_proxy->list, &rwnx_vif->ap.proxy_list); + } + } + + return 0; +} +#endif /* CONFIG_RWNX_FULLMAC */ + +/*************************************************************************** + * Messages from APM task + **************************************************************************/ + + +/*************************************************************************** + * Messages from DEBUG task + **************************************************************************/ +static inline int rwnx_rx_dbg_error_ind(struct rwnx_hw *rwnx_hw, + struct rwnx_cmd *cmd, + struct ipc_e2a_msg *msg) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); + + return 0; +} + +#ifdef CONFIG_RWNX_FULLMAC + +static msg_cb_fct mm_hdlrs[MSG_I(MM_MAX)] = { + [MSG_I(MM_CHANNEL_SWITCH_IND)] = rwnx_rx_chan_switch_ind, + [MSG_I(MM_CHANNEL_PRE_SWITCH_IND)] = rwnx_rx_chan_pre_switch_ind, + [MSG_I(MM_REMAIN_ON_CHANNEL_EXP_IND)] = rwnx_rx_remain_on_channel_exp_ind, + [MSG_I(MM_PS_CHANGE_IND)] = rwnx_rx_ps_change_ind, + [MSG_I(MM_TRAFFIC_REQ_IND)] = rwnx_rx_traffic_req_ind, + [MSG_I(MM_P2P_VIF_PS_CHANGE_IND)] = rwnx_rx_p2p_vif_ps_change_ind, + [MSG_I(MM_CSA_COUNTER_IND)] = rwnx_rx_csa_counter_ind, + [MSG_I(MM_CSA_FINISH_IND)] = rwnx_rx_csa_finish_ind, + [MSG_I(MM_CSA_TRAFFIC_IND)] = rwnx_rx_csa_traffic_ind, + [MSG_I(MM_CHANNEL_SURVEY_IND)] = rwnx_rx_channel_survey_ind, + [MSG_I(MM_P2P_NOA_UPD_IND)] = rwnx_rx_p2p_noa_upd_ind, + [MSG_I(MM_RSSI_STATUS_IND)] = rwnx_rx_rssi_status_ind, + [MSG_I(MM_PKTLOSS_IND)] = rwnx_rx_pktloss_notify_ind, + [MSG_I(MM_APM_STALOSS_IND)] = rwnx_apm_staloss_ind, + [MSG_I(MM_RADAR_DETECT_IND)] = rwnx_radar_detect_ind, +}; + +static msg_cb_fct scan_hdlrs[MSG_I(SCANU_MAX)] = { + [MSG_I(SCANU_START_CFM)] = rwnx_rx_scanu_start_cfm, + [MSG_I(SCANU_RESULT_IND)] = rwnx_rx_scanu_result_ind, +}; + +static msg_cb_fct me_hdlrs[MSG_I(ME_MAX)] = { + [MSG_I(ME_TKIP_MIC_FAILURE_IND)] = rwnx_rx_me_tkip_mic_failure_ind, + [MSG_I(ME_TX_CREDITS_UPDATE_IND)] = rwnx_rx_me_tx_credits_update_ind, +}; + +static msg_cb_fct sm_hdlrs[MSG_I(SM_MAX)] = { + [MSG_I(SM_CONNECT_IND)] = rwnx_rx_sm_connect_ind, + [MSG_I(SM_DISCONNECT_IND)] = rwnx_rx_sm_disconnect_ind, + [MSG_I(SM_EXTERNAL_AUTH_REQUIRED_IND)] = rwnx_rx_sm_external_auth_required_ind, +}; + +static msg_cb_fct apm_hdlrs[MSG_I(APM_MAX)] = { +}; + +static msg_cb_fct mesh_hdlrs[MSG_I(MESH_MAX)] = { + [MSG_I(MESH_PATH_CREATE_CFM)] = rwnx_rx_mesh_path_create_cfm, + [MSG_I(MESH_PEER_UPDATE_IND)] = rwnx_rx_mesh_peer_update_ind, + [MSG_I(MESH_PATH_UPDATE_IND)] = rwnx_rx_mesh_path_update_ind, + [MSG_I(MESH_PROXY_UPDATE_IND)] = rwnx_rx_mesh_proxy_update_ind, +}; + +#endif /* CONFIG_RWNX_FULLMAC */ + +static msg_cb_fct dbg_hdlrs[MSG_I(DBG_MAX)] = { + [MSG_I(DBG_ERROR_IND)] = rwnx_rx_dbg_error_ind, +}; + +static msg_cb_fct tdls_hdlrs[MSG_I(TDLS_MAX)] = { + [MSG_I(TDLS_CHAN_SWITCH_CFM)] = rwnx_rx_tdls_chan_switch_cfm, + [MSG_I(TDLS_CHAN_SWITCH_IND)] = rwnx_rx_tdls_chan_switch_ind, + [MSG_I(TDLS_CHAN_SWITCH_BASE_IND)] = rwnx_rx_tdls_chan_switch_base_ind, + [MSG_I(TDLS_PEER_PS_IND)] = rwnx_rx_tdls_peer_ps_ind, +}; + +static msg_cb_fct *msg_hdlrs[] = { + [TASK_MM] = mm_hdlrs, + [TASK_DBG] = dbg_hdlrs, +#ifdef CONFIG_RWNX_FULLMAC + [TASK_TDLS] = tdls_hdlrs, + [TASK_SCANU] = scan_hdlrs, + [TASK_ME] = me_hdlrs, + [TASK_SM] = sm_hdlrs, + [TASK_APM] = apm_hdlrs, + [TASK_MESH] = mesh_hdlrs, +#endif /* CONFIG_RWNX_FULLMAC */ +}; + +/** + * + */ +void rwnx_rx_handle_msg(struct rwnx_hw *rwnx_hw, struct ipc_e2a_msg *msg) +{ + //printk("%s(%d) MSG_T(msg->id):%d MSG_I(msg->id):%d cmd:%s\r\n", __func__, + // msg->id, + // MSG_T(msg->id), + // MSG_I(msg->id), + // rwnx_id2str[MSG_T(msg->id)][MSG_I(msg->id)]); + + rwnx_hw->cmd_mgr->msgind(rwnx_hw->cmd_mgr, msg, + msg_hdlrs[MSG_T(msg->id)][MSG_I(msg->id)]); +} + +void rwnx_rx_handle_print(struct rwnx_hw *rwnx_hw, u8 *msg, u32 len) +{ + u8 *data_end = NULL; + (void)data_end; + + if (!rwnx_hw || !rwnx_hw->fwlog_en) { + pr_err("FWLOG-OVFL: %s", msg); + return; + } + + printk("FWLOG: %s", msg); + +#ifdef CONFIG_RWNX_DEBUGFS + data_end = rwnx_hw->debugfs.fw_log.buf.dataend; + + if (!rwnx_hw->debugfs.fw_log.buf.data) + return ; + + //printk("end=%lx, len=%d\n", (unsigned long)rwnx_hw->debugfs.fw_log.buf.end, len); + + spin_lock_bh(&rwnx_hw->debugfs.fw_log.lock); + + if (rwnx_hw->debugfs.fw_log.buf.end + len > data_end) { + int rem = data_end - rwnx_hw->debugfs.fw_log.buf.end; + memcpy(rwnx_hw->debugfs.fw_log.buf.end, msg, rem); + memcpy(rwnx_hw->debugfs.fw_log.buf.data, &msg[rem], len - rem); + rwnx_hw->debugfs.fw_log.buf.end = rwnx_hw->debugfs.fw_log.buf.data + (len - rem); + } else { + memcpy(rwnx_hw->debugfs.fw_log.buf.end, msg, len); + rwnx_hw->debugfs.fw_log.buf.end += len; + } + + rwnx_hw->debugfs.fw_log.buf.size += len; + if (rwnx_hw->debugfs.fw_log.buf.size > FW_LOG_SIZE) + rwnx_hw->debugfs.fw_log.buf.size = FW_LOG_SIZE; + + spin_unlock_bh(&rwnx_hw->debugfs.fw_log.lock); +#endif +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_rx.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_rx.h new file mode 100644 index 0000000..b5556d1 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_rx.h @@ -0,0 +1,19 @@ +/** + **************************************************************************************** + * + * @file rwnx_msg_rx.h + * + * @brief RX function declarations + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ + +#ifndef _RWNX_MSG_RX_H_ +#define _RWNX_MSG_RX_H_ + +void rwnx_rx_handle_msg(struct rwnx_hw *rwnx_hw, struct ipc_e2a_msg *msg); +void rwnx_rx_handle_print(struct rwnx_hw *rwnx_hw, u8 *msg, u32 len); + +#endif /* _RWNX_MSG_RX_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_tx.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_tx.c new file mode 100755 index 0000000..eeaaa13 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_tx.c @@ -0,0 +1,5064 @@ +/** + ****************************************************************************** + * + * @file rwnx_msg_tx.c + * + * @brief TX function definitions + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#include "rwnx_msg_tx.h" +#include "rwnx_mod_params.h" +#include "reg_access.h" +#ifdef CONFIG_RWNX_BFMER +#include "rwnx_bfmer.h" +#endif //(CONFIG_RWNX_BFMER) +#include "rwnx_compat.h" +#include "rwnx_cmds.h" +#include "rwnx_main.h" +#include "aicwf_txrxif.h" +#include "rwnx_strs.h" + +#ifdef RF_WRITE_FILE +#include +#endif + + +const struct mac_addr mac_addr_bcst = {{0xFFFF, 0xFFFF, 0xFFFF}}; + +/* Default MAC Rx filters that can be changed by mac80211 + * (via the configure_filter() callback) */ +#define RWNX_MAC80211_CHANGEABLE ( \ + NXMAC_ACCEPT_BA_BIT | \ + NXMAC_ACCEPT_BAR_BIT | \ + NXMAC_ACCEPT_OTHER_DATA_FRAMES_BIT | \ + NXMAC_ACCEPT_PROBE_REQ_BIT | \ + NXMAC_ACCEPT_PS_POLL_BIT \ + ) + +/* Default MAC Rx filters that cannot be changed by mac80211 */ +#define RWNX_MAC80211_NOT_CHANGEABLE ( \ + NXMAC_ACCEPT_QO_S_NULL_BIT | \ + NXMAC_ACCEPT_Q_DATA_BIT | \ + NXMAC_ACCEPT_DATA_BIT | \ + NXMAC_ACCEPT_OTHER_MGMT_FRAMES_BIT | \ + NXMAC_ACCEPT_MY_UNICAST_BIT | \ + NXMAC_ACCEPT_BROADCAST_BIT | \ + NXMAC_ACCEPT_BEACON_BIT | \ + NXMAC_ACCEPT_PROBE_RESP_BIT \ + ) + +/* Default MAC Rx filter */ +#define RWNX_DEFAULT_RX_FILTER (RWNX_MAC80211_CHANGEABLE | RWNX_MAC80211_NOT_CHANGEABLE) + +const int bw2chnl[] = { + [NL80211_CHAN_WIDTH_20_NOHT] = PHY_CHNL_BW_20, + [NL80211_CHAN_WIDTH_20] = PHY_CHNL_BW_20, + [NL80211_CHAN_WIDTH_40] = PHY_CHNL_BW_40, + [NL80211_CHAN_WIDTH_80] = PHY_CHNL_BW_80, + [NL80211_CHAN_WIDTH_160] = PHY_CHNL_BW_160, + [NL80211_CHAN_WIDTH_80P80] = PHY_CHNL_BW_80P80, +}; + +const int chnl2bw[] = { + [PHY_CHNL_BW_20] = NL80211_CHAN_WIDTH_20, + [PHY_CHNL_BW_40] = NL80211_CHAN_WIDTH_40, + [PHY_CHNL_BW_80] = NL80211_CHAN_WIDTH_80, + [PHY_CHNL_BW_160] = NL80211_CHAN_WIDTH_160, + [PHY_CHNL_BW_80P80] = NL80211_CHAN_WIDTH_80P80, +}; + +#define RWNX_CMD_ARRAY_SIZE 40 +#define RWNX_CMD_HIGH_WATER_SIZE RWNX_CMD_ARRAY_SIZE/2 +//#define RWNX_MSG_ARRAY_SIZE 20 + +struct rwnx_cmd cmd_array[RWNX_CMD_ARRAY_SIZE]; +//struct lmac_msg msg_array[RWNX_MSG_ARRAY_SIZE]; + +static spinlock_t cmd_array_lock; +//static spinlock_t msg_array_lock; + +//int msg_array_index = 0; +int cmd_array_index = 0; + + + +/*****************************************************************************/ +/* + * Parse the ampdu density to retrieve the value in usec, according to the + * values defined in ieee80211.h + */ +static inline u8 rwnx_ampdudensity2usec(u8 ampdudensity) +{ + switch (ampdudensity) { + case IEEE80211_HT_MPDU_DENSITY_NONE: + return 0; + /* 1 microsecond is our granularity */ + case IEEE80211_HT_MPDU_DENSITY_0_25: + case IEEE80211_HT_MPDU_DENSITY_0_5: + case IEEE80211_HT_MPDU_DENSITY_1: + return 1; + case IEEE80211_HT_MPDU_DENSITY_2: + return 2; + case IEEE80211_HT_MPDU_DENSITY_4: + return 4; + case IEEE80211_HT_MPDU_DENSITY_8: + return 8; + case IEEE80211_HT_MPDU_DENSITY_16: + return 16; + default: + return 0; + } +} + +static inline bool use_pairwise_key(struct cfg80211_crypto_settings *crypto) +{ + if ((crypto->cipher_group == WLAN_CIPHER_SUITE_WEP40) || + (crypto->cipher_group == WLAN_CIPHER_SUITE_WEP104)) + return false; + + return true; +} + +static inline bool is_non_blocking_msg(int id) +{ + return ((id == MM_TIM_UPDATE_REQ) || (id == ME_RC_SET_RATE_REQ) || + (id == MM_BFMER_ENABLE_REQ) || (id == ME_TRAFFIC_IND_REQ) || + (id == TDLS_PEER_TRAFFIC_IND_REQ) || + (id == MESH_PATH_CREATE_REQ) || (id == MESH_PROXY_ADD_REQ) || + (id == SM_EXTERNAL_AUTH_REQUIRED_RSP)); +} + +static inline u8_l get_chan_flags(uint32_t flags) +{ + u8_l chan_flags = 0; +#ifdef CONFIG_RADAR_OR_IR_DETECT + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) + if (flags & IEEE80211_CHAN_PASSIVE_SCAN) + #else + if (flags & IEEE80211_CHAN_NO_IR) + #endif + chan_flags |= CHAN_NO_IR; + if (flags & IEEE80211_CHAN_RADAR) + chan_flags |= CHAN_RADAR; +#endif + return chan_flags; +} + +static inline s8_l chan_to_fw_pwr(int power) +{ + return power>127?127:(s8_l)power; +} + +static inline void limit_chan_bw(u8_l *bw, u16_l primary, u16_l *center1) +{ + int oft, new_oft = 10; + + if (*bw <= PHY_CHNL_BW_40) + return; + + oft = *center1 - primary; + *bw = PHY_CHNL_BW_40; + + if (oft < 0) + new_oft = new_oft * -1; + if (abs(oft) == 10 || abs(oft) == 50) + new_oft = new_oft * -1; + + *center1 = primary + new_oft; +} + +struct rwnx_cmd *rwnx_cmd_malloc(void){ + struct rwnx_cmd *cmd = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&cmd_array_lock, flags); + + for(cmd_array_index = 0; cmd_array_index < RWNX_CMD_ARRAY_SIZE; cmd_array_index++){ + if(cmd_array[cmd_array_index].used == 0){ + AICWFDBG(LOGTRACE, "%s get cmd_array[%d]:%p \r\n", __func__, cmd_array_index,&cmd_array[cmd_array_index]); + cmd = &cmd_array[cmd_array_index]; + cmd_array[cmd_array_index].used = 1; + break; + } + } + + if(cmd_array_index >= RWNX_CMD_HIGH_WATER_SIZE){ + AICWFDBG(LOGERROR, "%s cmd(%d) was pending...\r\n", __func__, cmd_array_index); + mdelay(100); + } + + if(!cmd){ + AICWFDBG(LOGERROR, "%s array is empty...\r\n", __func__); + } + + spin_unlock_irqrestore(&cmd_array_lock, flags); + + return cmd; +} + +void rwnx_cmd_free(struct rwnx_cmd *cmd){ + unsigned long flags = 0; + + spin_lock_irqsave(&cmd_array_lock, flags); + cmd->used = 0; + AICWFDBG(LOGTRACE, "%s cmd_array[%d]:%p \r\n", __func__, cmd->array_id, cmd); + spin_unlock_irqrestore(&cmd_array_lock, flags); +} + + +int rwnx_init_cmd_array(void){ + + AICWFDBG(LOGTRACE, "%s Enter \r\n", __func__); + spin_lock_init(&cmd_array_lock); + + for(cmd_array_index = 0; cmd_array_index < RWNX_CMD_ARRAY_SIZE; cmd_array_index++){ + AICWFDBG(LOGTRACE, "%s cmd_queue[%d]:%p \r\n", __func__, cmd_array_index, &cmd_array[cmd_array_index]); + cmd_array[cmd_array_index].used = 0; + cmd_array[cmd_array_index].array_id = cmd_array_index; + } + AICWFDBG(LOGTRACE, "%s Exit \r\n", __func__); + + return 0; +} + +void rwnx_free_cmd_array(void){ + + AICWFDBG(LOGTRACE, "%s Enter \r\n", __func__); + + for(cmd_array_index = 0; cmd_array_index < RWNX_CMD_ARRAY_SIZE; cmd_array_index++){ + cmd_array[cmd_array_index].used = 0; + } + + AICWFDBG(LOGTRACE, "%s Exit \r\n", __func__); +} + + +#if 0 +int rwnx_init_msg_array(void){ + + printk("%s enter\r\n", __func__); + spin_lock_init(&msg_array_lock); + + for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){ + printk("%s msg_queue[%d]:%p \r\n", __func__, msg_array_index, &msg_array[msg_array_index]); + } + printk("%s exit\r\n", __func__); + + return 0; + +} + + +void rwnx_free_msg_array(void){ + + printk("%s enter\r\n", __func__); +#if 1 + for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){ + msg_array[msg_array_index].param_len = 0; + } +#endif + printk("%s exit\r\n", __func__); +} + +struct lmac_msg *rwnx_msg_malloc_(void){ + struct lmac_msg *msg = NULL; + + + spin_lock(&msg_array_lock); + printk("%s enter\r\n", __func__); + for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){ + if(msg_array[msg_array_index].param_len== 0){ + printk("%s get msg_array[%d]:%p \r\n", __func__, msg_array_index, &msg_array[msg_array_index]); + msg = &msg_array[msg_array_index]; + break; + }else{ + printk("%s msg_array[%d] in used param_len= %d \r\n", + __func__, + msg_array_index, + msg_array[msg_array_index].param_len); + } + } + + if(!msg){ + printk("%s array is empty...\r\n", __func__); + } + spin_unlock(&msg_array_lock); + + return msg; +} + +void rwnx_msg_free_(struct lmac_msg *msg){ + + spin_lock(&msg_array_lock); + printk("%s enter \r\n", __func__); + + for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){ + if(msg == &msg_array[msg_array_index]){ + break; + } + } + + memset(msg->param, 0, msg->param_len); + msg->id = 0; + msg->dest_id = 0; + msg->src_id = 0; + msg->param_len = 0; + + printk("%s msg_array[%d]:%p \r\n", __func__, msg_array_index, msg); + spin_unlock(&msg_array_lock); +} +#endif + + +/** + ****************************************************************************** + * @brief Allocate memory for a message + * + * This primitive allocates memory for a message that has to be sent. The memory + * is allocated dynamically on the heap and the length of the variable parameter + * structure has to be provided in order to allocate the correct size. + * + * Several additional parameters are provided which will be preset in the message + * and which may be used internally to choose the kind of memory to allocate. + * + * The memory allocated will be automatically freed by the kernel, after the + * pointer has been sent to ke_msg_send(). If the message is not sent, it must + * be freed explicitly with ke_msg_free(). + * + * Allocation failure is considered critical and should not happen. + * + * @param[in] id Message identifier + * @param[in] dest_id Destination Task Identifier + * @param[in] src_id Source Task Identifier + * @param[in] param_len Size of the message parameters to be allocated + * + * @return Pointer to the parameter member of the ke_msg. If the parameter + * structure is empty, the pointer will point to the end of the message + * and should not be used (except to retrieve the message pointer or to + * send the message) + ****************************************************************************** + */ +static inline void *rwnx_msg_zalloc(lmac_msg_id_t const id, + lmac_task_id_t const dest_id, + lmac_task_id_t const src_id, + uint16_t const param_len) +{ + struct lmac_msg *msg; + gfp_t flags; + + //if (is_non_blocking_msg(id) && in_softirq()) + flags = GFP_ATOMIC; + //else + // flags = GFP_KERNEL; + + msg = (struct lmac_msg *)kzalloc(sizeof(struct lmac_msg) + param_len, + flags); + if (msg == NULL) { + printk(KERN_CRIT "%s: msg allocation failed\n", __func__); + return NULL; + } + msg->id = id; + msg->dest_id = dest_id; + msg->src_id = src_id; + msg->param_len = param_len; + //printk("rwnx_msg_zalloc size=%d id=%d\n",msg->param_len,msg->id); + + return msg->param; +} + +static void rwnx_msg_free(struct rwnx_hw *rwnx_hw, const void *msg_params) +{ + struct lmac_msg *msg = container_of((void *)msg_params, + struct lmac_msg, param); + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Free the message */ + kfree(msg); +} + + + +static int rwnx_send_msg(struct rwnx_hw *rwnx_hw, const void *msg_params, + int reqcfm, lmac_msg_id_t reqid, void *cfm) +{ + struct lmac_msg *msg; + struct rwnx_cmd *cmd; + bool nonblock; + int ret = 0; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); + AICWFDBG(LOGDEBUG, "%s (%d)%s reqcfm:%d in_softirq:%d in_atomic:%d\r\n", + __func__, reqid, RWNX_ID2STR(reqid), reqcfm, (int)in_softirq(), (int)in_atomic()); + +#ifdef AICWF_USB_SUPPORT + if (rwnx_hw->usbdev->state == USB_DOWN_ST) { + rwnx_msg_free(rwnx_hw, msg_params); + AICWFDBG(LOGERROR, "%s bus is down\n", __func__); + return 0; + } +#endif +#ifdef AICWF_SDIO_SUPPORT + if(rwnx_hw->sdiodev->bus_if->state == BUS_DOWN_ST) { + rwnx_msg_free(rwnx_hw, msg_params); + sdio_err("bus is down\n"); + return 0; + } +#endif + + msg = container_of((void *)msg_params, struct lmac_msg, param); + + #if 0 + if (!test_bit(RWNX_DEV_STARTED, &rwnx_hw->drv_flags) && + reqid != DBG_MEM_READ_CFM && reqid != DBG_MEM_WRITE_CFM && + reqid != DBG_MEM_BLOCK_WRITE_CFM && reqid != DBG_START_APP_CFM && + reqid != MM_SET_RF_CALIB_CFM && reqid != MM_SET_RF_CONFIG_CFM && + reqid != MM_RESET_CFM && reqid != MM_VERSION_CFM && + reqid != MM_START_CFM && reqid != MM_SET_IDLE_CFM && + reqid != ME_CONFIG_CFM && reqid != MM_SET_PS_MODE_CFM && + reqid != ME_CHAN_CONFIG_CFM) { + printk(KERN_CRIT "%s: bypassing (RWNX_DEV_RESTARTING set) 0x%02x\n", + __func__, reqid); + kfree(msg); + return -EBUSY; + } + #endif +#if 0 + else if (!rwnx_hw->ipc_env) { + printk(KERN_CRIT "%s: bypassing (restart must have failed)\n", __func__); + kfree(msg); + return -EBUSY; + } +#endif + + //nonblock = is_non_blocking_msg(msg->id); + nonblock = 0;//AIDEN + cmd = rwnx_cmd_malloc();//kzalloc(sizeof(struct rwnx_cmd), nonblock ? GFP_ATOMIC : GFP_KERNEL); + cmd->result = -EINTR; + cmd->id = msg->id; + cmd->reqid = reqid; + cmd->a2e_msg = msg; + cmd->e2a_msg = cfm; + if (nonblock) + cmd->flags = RWNX_CMD_FLAG_NONBLOCK; + if (reqcfm) + cmd->flags |= RWNX_CMD_FLAG_REQ_CFM; +#if 0 + if(cfm != NULL) { + do { + if(rwnx_hw->cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) + break; + spin_lock_bh(&rwnx_hw->cmd_mgr->lock); + empty = list_empty(&rwnx_hw->cmd_mgr->cmds); + spin_unlock_bh(&rwnx_hw->cmd_mgr->lock); + if(!empty) { + if(in_softirq()) { + #ifdef CONFIG_RWNX_DBG + AICWFDBG(LOGDEBUG, "in_softirq:check cmdqueue empty\n"); + #endif + mdelay(10); + } + else { + #ifdef CONFIG_RWNX_DBG + AICWFDBG(LOGDEBUG, "check cmdqueue empty\n"); + #endif + msleep(50); + } + } + } while(!empty);//wait for cmd queue empty + } +#endif + if(reqcfm) { + cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK; // we don't need ack any more + ret = rwnx_hw->cmd_mgr->queue(rwnx_hw->cmd_mgr, cmd); + } else { +#ifdef AICWF_SDIO_SUPPORT + aicwf_set_cmd_tx((void *)(rwnx_hw->sdiodev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len); +#else + aicwf_set_cmd_tx((void *)(rwnx_hw->usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len); +#endif + } + + if(!reqcfm || ret) + rwnx_cmd_free(cmd);//kfree(cmd); + + return ret;//0; +} + + +static int rwnx_send_msg1(struct rwnx_hw *rwnx_hw, const void *msg_params, + int reqcfm, lmac_msg_id_t reqid, void *cfm, bool defer) +{ + struct lmac_msg *msg; + struct rwnx_cmd *cmd; + bool nonblock; + int ret = 0; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); + + AICWFDBG(LOGDEBUG,"%s (%d)%s reqcfm:%d in_softirq:%d in_atomic:%d\r\n", + __func__, reqid, RWNX_ID2STR(reqid), reqcfm, (int)in_softirq(), (int)in_atomic()); + + if (rwnx_hw->usbdev->state == USB_DOWN_ST) { + rwnx_msg_free(rwnx_hw, msg_params); + AICWFDBG(LOGERROR, "%s bus is down\n", __func__); + return 0; + } + + + msg = container_of((void *)msg_params, struct lmac_msg, param); + + //nonblock = is_non_blocking_msg(msg->id); + nonblock = 0; + cmd = rwnx_cmd_malloc();//kzalloc(sizeof(struct rwnx_cmd), nonblock ? GFP_ATOMIC : GFP_KERNEL); + cmd->result = -EINTR; + cmd->id = msg->id; + cmd->reqid = reqid; + cmd->a2e_msg = msg; + cmd->e2a_msg = cfm; + if (nonblock) + cmd->flags = RWNX_CMD_FLAG_NONBLOCK; + if (reqcfm) + cmd->flags |= RWNX_CMD_FLAG_REQ_CFM; + + if(reqcfm) { + cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK; // we don't need ack any more + if(!defer) + ret = rwnx_hw->cmd_mgr->queue(rwnx_hw->cmd_mgr, cmd); + else + ret = cmd_mgr_queue_force_defer(rwnx_hw->cmd_mgr, cmd); + } + + if (!reqcfm || ret) { + rwnx_cmd_free(cmd);//kfree(cmd); + } + + if (!ret) { + ret = cmd->result; + } + + //return ret; + return 0; +} + +/****************************************************************************** + * Control messages handling functions (FULLMAC) + *****************************************************************************/ +int rwnx_send_reset(struct rwnx_hw *rwnx_hw) +{ + void *void_param; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* RESET REQ has no parameter */ + void_param = rwnx_msg_zalloc(MM_RESET_REQ, TASK_MM, DRV_TASK_ID, 0); + if (!void_param) + return -ENOMEM; + + return rwnx_send_msg(rwnx_hw, void_param, 1, MM_RESET_CFM, NULL); +} + +int rwnx_send_start(struct rwnx_hw *rwnx_hw) +{ + struct mm_start_req *start_req_param; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the START REQ message */ + start_req_param = rwnx_msg_zalloc(MM_START_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_start_req)); + if (!start_req_param) + return -ENOMEM; + + /* Set parameters for the START message */ + memcpy(&start_req_param->phy_cfg, &rwnx_hw->phy.cfg, sizeof(rwnx_hw->phy.cfg)); + start_req_param->uapsd_timeout = (u32_l)rwnx_hw->mod_params->uapsd_timeout; + start_req_param->lp_clk_accuracy = (u16_l)rwnx_hw->mod_params->lp_clk_ppm; + + /* Send the START REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, start_req_param, 1, MM_START_CFM, NULL); +} + +int rwnx_send_version_req(struct rwnx_hw *rwnx_hw, struct mm_version_cfm *cfm) +{ + void *void_param; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* VERSION REQ has no parameter */ + void_param = rwnx_msg_zalloc(MM_VERSION_REQ, TASK_MM, DRV_TASK_ID, 0); + if (!void_param) + return -ENOMEM; + + return rwnx_send_msg(rwnx_hw, void_param, 1, MM_VERSION_CFM, cfm); +} + +int rwnx_send_add_if(struct rwnx_hw *rwnx_hw, const unsigned char *mac, + enum nl80211_iftype iftype, bool p2p, struct mm_add_if_cfm *cfm) +{ + struct mm_add_if_req *add_if_req_param; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ADD_IF_REQ message */ + add_if_req_param = rwnx_msg_zalloc(MM_ADD_IF_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_add_if_req)); + if (!add_if_req_param) + return -ENOMEM; + + /* Set parameters for the ADD_IF_REQ message */ + memcpy(&(add_if_req_param->addr.array[0]), mac, ETH_ALEN); + switch (iftype) { + #ifdef CONFIG_RWNX_FULLMAC + //case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_P2P_CLIENT: + add_if_req_param->p2p = true; + // no break + #endif /* CONFIG_RWNX_FULLMAC */ + case NL80211_IFTYPE_STATION: + add_if_req_param->type = MM_STA; + break; + + case NL80211_IFTYPE_ADHOC: + add_if_req_param->type = MM_IBSS; + break; + + #ifdef CONFIG_RWNX_FULLMAC + case NL80211_IFTYPE_P2P_GO: + add_if_req_param->p2p = true; + // no break + #endif /* CONFIG_RWNX_FULLMAC */ + case NL80211_IFTYPE_AP: + add_if_req_param->type = MM_AP; + break; + case NL80211_IFTYPE_MESH_POINT: + add_if_req_param->type = MM_MESH_POINT; + break; + case NL80211_IFTYPE_AP_VLAN: + return -1; + case NL80211_IFTYPE_MONITOR: + add_if_req_param->type = MM_MONITOR; + break; + default: + add_if_req_param->type = MM_STA; + break; + } + + + /* Send the ADD_IF_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, add_if_req_param, 1, MM_ADD_IF_CFM, cfm); +} + +int rwnx_send_remove_if(struct rwnx_hw *rwnx_hw, u8 vif_index, bool defer) +{ + struct mm_remove_if_req *remove_if_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_REMOVE_IF_REQ message */ + remove_if_req = rwnx_msg_zalloc(MM_REMOVE_IF_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_remove_if_req)); + if (!remove_if_req) + return -ENOMEM; + + /* Set parameters for the MM_REMOVE_IF_REQ message */ + remove_if_req->inst_nbr = vif_index; + + /* Send the MM_REMOVE_IF_REQ message to LMAC FW */ + return rwnx_send_msg1(rwnx_hw, remove_if_req, 1, MM_REMOVE_IF_CFM, NULL, defer); +} + +int rwnx_send_set_channel(struct rwnx_hw *rwnx_hw, int phy_idx, + struct mm_set_channel_cfm *cfm) +{ + struct mm_set_channel_req *req; + enum nl80211_chan_width width; + u16 center_freq, center_freq1, center_freq2; + s8 tx_power = 0; + u8 flags; + enum nl80211_band band; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (phy_idx >= rwnx_hw->phy.cnt) + return -ENOTSUPP; + + req = rwnx_msg_zalloc(MM_SET_CHANNEL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_channel_req)); + if (!req) + return -ENOMEM; + + if (phy_idx == 0) { +#ifdef CONFIG_RWNX_FULLMAC + /* On FULLMAC only setting channel of secondary chain */ + wiphy_err(rwnx_hw->wiphy, "Trying to set channel of primary chain"); + return 0; +#endif /* CONFIG_RWNX_FULLMAC */ + } else { + struct rwnx_sec_phy_chan *chan = &rwnx_hw->phy.sec_chan; + + width = chnl2bw[chan->type]; + band = chan->band; + center_freq = chan->prim20_freq; + center_freq1 = chan->center_freq1; + center_freq2 = chan->center_freq2; + flags = 0; + } + + req->chan.band = band; + req->chan.type = bw2chnl[width]; + req->chan.prim20_freq = center_freq; + req->chan.center1_freq = center_freq1; + req->chan.center2_freq = center_freq2; + req->chan.tx_power = tx_power; + req->chan.flags = flags; + req->index = phy_idx; + + if (rwnx_hw->phy.limit_bw) + limit_chan_bw(&req->chan.type, req->chan.prim20_freq, &req->chan.center1_freq); + + RWNX_DBG("mac80211: freq=%d(c1:%d - c2:%d)/width=%d - band=%d\n" + " hw(%d): prim20=%d(c1:%d - c2:%d)/ type=%d - band=%d\n", + center_freq, center_freq1, center_freq2, width, band, + phy_idx, req->chan.prim20_freq, req->chan.center1_freq, + req->chan.center2_freq, req->chan.type, req->chan.band); + + /* Send the MM_SET_CHANNEL_REQ REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MM_SET_CHANNEL_CFM, cfm); +} + +int rwnx_send_key_add(struct rwnx_hw *rwnx_hw, u8 vif_idx, u8 sta_idx, bool pairwise, + u8 *key, u8 key_len, u8 key_idx, u8 cipher_suite, + struct mm_key_add_cfm *cfm) +{ + struct mm_key_add_req *key_add_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_KEY_ADD_REQ message */ + key_add_req = rwnx_msg_zalloc(MM_KEY_ADD_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_key_add_req)); + if (!key_add_req) + return -ENOMEM; + + /* Set parameters for the MM_KEY_ADD_REQ message */ + if (sta_idx != 0xFF) { + /* Pairwise key */ + key_add_req->sta_idx = sta_idx; + } else { + /* Default key */ + key_add_req->sta_idx = sta_idx; + key_add_req->key_idx = (u8_l)key_idx; /* only useful for default keys */ + } + key_add_req->pairwise = pairwise; + key_add_req->inst_nbr = vif_idx; + key_add_req->key.length = key_len; + memcpy(&(key_add_req->key.array[0]), key, key_len); + + key_add_req->cipher_suite = cipher_suite; + + RWNX_DBG("%s: sta_idx:%d key_idx:%d inst_nbr:%d cipher:%d key_len:%d\n", __func__, + key_add_req->sta_idx, key_add_req->key_idx, key_add_req->inst_nbr, + key_add_req->cipher_suite, key_add_req->key.length); +#if defined(CONFIG_RWNX_DBG) || defined(CONFIG_DYNAMIC_DEBUG) + print_hex_dump_bytes("key: ", DUMP_PREFIX_OFFSET, key_add_req->key.array, key_add_req->key.length); +#endif + + /* Send the MM_KEY_ADD_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, key_add_req, 1, MM_KEY_ADD_CFM, cfm); +} + +int rwnx_send_key_del(struct rwnx_hw *rwnx_hw, uint8_t hw_key_idx) +{ + struct mm_key_del_req *key_del_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_KEY_DEL_REQ message */ + key_del_req = rwnx_msg_zalloc(MM_KEY_DEL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_key_del_req)); + if (!key_del_req) + return -ENOMEM; + + /* Set parameters for the MM_KEY_DEL_REQ message */ + key_del_req->hw_key_idx = hw_key_idx; + + /* Send the MM_KEY_DEL_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, key_del_req, 1, MM_KEY_DEL_CFM, NULL); +} + +int rwnx_send_bcn(struct rwnx_hw *rwnx_hw,u8 *buf, u8 vif_idx, u16 bcn_len) +{ + struct apm_set_bcn_ie_req *bcn_ie_req; + bcn_ie_req = rwnx_msg_zalloc(APM_SET_BEACON_IE_REQ, TASK_APM, DRV_TASK_ID, + sizeof(struct apm_set_bcn_ie_req)); + if (!bcn_ie_req) + return -ENOMEM; + + bcn_ie_req->vif_idx = vif_idx; + bcn_ie_req->bcn_ie_len = bcn_len; + memcpy(bcn_ie_req->bcn_ie, (u8 *)buf, bcn_len); + kfree(buf); + + return rwnx_send_msg(rwnx_hw, bcn_ie_req, 1, APM_SET_BEACON_IE_CFM, NULL); +} + +int rwnx_send_bcn_change(struct rwnx_hw *rwnx_hw, u8 vif_idx, u32 bcn_addr, + u16 bcn_len, u16 tim_oft, u16 tim_len, u16 *csa_oft) +{ + struct mm_bcn_change_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_BCN_CHANGE_REQ message */ + req = rwnx_msg_zalloc(MM_BCN_CHANGE_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_bcn_change_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the MM_BCN_CHANGE_REQ message */ + req->bcn_ptr = bcn_addr; + req->bcn_len = bcn_len; + req->tim_oft = tim_oft; + req->tim_len = tim_len; + req->inst_nbr = vif_idx; + + if (csa_oft) { + int i; + for (i = 0; i < BCN_MAX_CSA_CPT; i++) { + req->csa_oft[i] = csa_oft[i]; + } + } + + /* Send the MM_BCN_CHANGE_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MM_BCN_CHANGE_CFM, NULL); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) +static inline void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, + struct ieee80211_channel *chan, + enum nl80211_channel_type chan_type) +{ + if (WARN_ON(!chan)) + return; + chandef->chan = chan; + chandef->center_freq2 = 0; + switch (chan_type) { + case NL80211_CHAN_NO_HT: + chandef->width = NL80211_CHAN_WIDTH_20_NOHT; + chandef->center_freq1 = chan->center_freq; + break; + case NL80211_CHAN_HT20: + chandef->width = NL80211_CHAN_WIDTH_20; + chandef->center_freq1 = chan->center_freq; + break; + case NL80211_CHAN_HT40PLUS: + chandef->width = NL80211_CHAN_WIDTH_40; + chandef->center_freq1 = chan->center_freq + 10; + break; + case NL80211_CHAN_HT40MINUS: + chandef->width = NL80211_CHAN_WIDTH_40; + chandef->center_freq1 = chan->center_freq - 10; + break; + default: + WARN_ON(1); + } +} +#endif + +int rwnx_send_roc(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct ieee80211_channel *chan, unsigned int duration, + struct mm_remain_on_channel_cfm *roc_cfm) +{ + struct mm_remain_on_channel_req *req; + struct cfg80211_chan_def chandef; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Create channel definition structure */ + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + + /* Build the MM_REMAIN_ON_CHANNEL_REQ message */ + req = rwnx_msg_zalloc(MM_REMAIN_ON_CHANNEL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_remain_on_channel_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the MM_REMAIN_ON_CHANNEL_REQ message */ + req->op_code = MM_ROC_OP_START; + req->vif_index = vif->vif_index; + req->duration_ms = duration; + req->band = chan->band; + req->type = bw2chnl[chandef.width]; + req->prim20_freq = chan->center_freq; + req->center1_freq = chandef.center_freq1; + req->center2_freq = chandef.center_freq2; + req->tx_power = chan_to_fw_pwr(chan->max_power); + + /* Send the MM_REMAIN_ON_CHANNEL_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MM_REMAIN_ON_CHANNEL_CFM, roc_cfm); +} + +int rwnx_send_cancel_roc(struct rwnx_hw *rwnx_hw) +{ + struct mm_remain_on_channel_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_REMAIN_ON_CHANNEL_REQ message */ + req = rwnx_msg_zalloc(MM_REMAIN_ON_CHANNEL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_remain_on_channel_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the MM_REMAIN_ON_CHANNEL_REQ message */ + req->op_code = MM_ROC_OP_CANCEL; + + /* Send the MM_REMAIN_ON_CHANNEL_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MM_REMAIN_ON_CHANNEL_CFM, NULL); +} + +int rwnx_send_set_power(struct rwnx_hw *rwnx_hw, u8 vif_idx, s8 pwr, + struct mm_set_power_cfm *cfm) +{ + struct mm_set_power_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_POWER_REQ message */ + req = rwnx_msg_zalloc(MM_SET_POWER_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_power_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the MM_SET_POWER_REQ message */ + req->inst_nbr = vif_idx; + req->power = pwr; + + /* Send the MM_SET_POWER_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MM_SET_POWER_CFM, cfm); +} + +int rwnx_send_set_edca(struct rwnx_hw *rwnx_hw, u8 hw_queue, u32 param, + bool uapsd, u8 inst_nbr) +{ + struct mm_set_edca_req *set_edca_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_EDCA_REQ message */ + set_edca_req = rwnx_msg_zalloc(MM_SET_EDCA_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_edca_req)); + if (!set_edca_req) + return -ENOMEM; + + /* Set parameters for the MM_SET_EDCA_REQ message */ + set_edca_req->ac_param = param; + set_edca_req->uapsd = uapsd; + set_edca_req->hw_queue = hw_queue; + set_edca_req->inst_nbr = inst_nbr; + + /* Send the MM_SET_EDCA_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, set_edca_req, 1, MM_SET_EDCA_CFM, NULL); +} + +#ifdef CONFIG_RWNX_P2P_DEBUGFS +int rwnx_send_p2p_oppps_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u8 ctw, struct mm_set_p2p_oppps_cfm *cfm) +{ + struct mm_set_p2p_oppps_req *p2p_oppps_req; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_P2P_OPPPS_REQ message */ + p2p_oppps_req = rwnx_msg_zalloc(MM_SET_P2P_OPPPS_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_p2p_oppps_req)); + + if (!p2p_oppps_req) { + return -ENOMEM; + } + + /* Fill the message parameters */ + p2p_oppps_req->vif_index = rwnx_vif->vif_index; + p2p_oppps_req->ctwindow = ctw; + + /* Send the MM_P2P_OPPPS_REQ message to LMAC FW */ + error = rwnx_send_msg(rwnx_hw, p2p_oppps_req, 1, MM_SET_P2P_OPPPS_CFM, cfm); + + return (error); +} + +int rwnx_send_p2p_noa_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + int count, int interval, int duration, bool dyn_noa, + struct mm_set_p2p_noa_cfm *cfm) +{ + struct mm_set_p2p_noa_req *p2p_noa_req; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Param check */ + if (count > 255) + count = 255; + + if (duration >= interval) { + dev_err(rwnx_hw->dev, "Invalid p2p NOA config: interval=%d <= duration=%d\n", + interval, duration); + return -EINVAL; + } + + /* Build the MM_SET_P2P_NOA_REQ message */ + p2p_noa_req = rwnx_msg_zalloc(MM_SET_P2P_NOA_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_p2p_noa_req)); + + if (!p2p_noa_req) { + return -ENOMEM; + } + + /* Fill the message parameters */ + p2p_noa_req->vif_index = rwnx_vif->vif_index; + p2p_noa_req->noa_inst_nb = 0; + p2p_noa_req->count = count; + + if (count) { + p2p_noa_req->duration_us = duration * 1024; + p2p_noa_req->interval_us = interval * 1024; + p2p_noa_req->start_offset = (interval - duration - 10) * 1024; + p2p_noa_req->dyn_noa = dyn_noa; + } + + /* Send the MM_SET_2P_NOA_REQ message to LMAC FW */ + error = rwnx_send_msg(rwnx_hw, p2p_noa_req, 1, MM_SET_P2P_NOA_CFM, cfm); + + return (error); +} +#endif /* CONFIG_RWNX_P2P_DEBUGFS */ + +#ifdef AICWF_ARP_OFFLOAD +int rwnx_send_arpoffload_en_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u32_l ipaddr, u8_l enable) +{ + struct mm_set_arpoffload_en_req *arp_offload_req; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_ARPOFFLOAD_REQ message */ + arp_offload_req = rwnx_msg_zalloc(MM_SET_ARPOFFLOAD_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_arpoffload_en_req)); + + if (!arp_offload_req) { + return -ENOMEM; + } + + /* Fill the message parameters */ + arp_offload_req->enable = enable; + arp_offload_req->vif_idx = rwnx_vif->vif_index; + arp_offload_req->ipaddr = ipaddr; + + /* Send the MM_ARPOFFLOAD_EN_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, arp_offload_req, 1, MM_SET_ARPOFFLOAD_CFM, NULL); + + return (error); +} +#endif + +int rwnx_send_disable_agg_req(struct rwnx_hw *rwnx_hw, u8_l agg_disable, u8_l agg_disable_rx, u8_l sta_idx) +{ + struct mm_set_agg_disable_req *req; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_AGG_DISABLE_REQ message */ + req = rwnx_msg_zalloc(MM_SET_AGG_DISABLE_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_agg_disable_req)); + + if (!req) { + return -ENOMEM; + } + + req->disable = agg_disable; + req->staidx = sta_idx; + req->disable_rx = agg_disable_rx; + + /* Send the MM_SET_AGG_DISABLE_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_AGG_DISABLE_CFM, NULL); + + return (error); +}; + +int rwnx_send_coex_req(struct rwnx_hw *rwnx_hw, u8_l disable_coexnull, u8_l enable_nullcts) +{ + struct mm_set_coex_req *coex_req; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + + /* Build the MM_SET_COEX_REQ message */ + coex_req = rwnx_msg_zalloc(MM_SET_COEX_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_coex_req)); + + if (!coex_req) { + return -ENOMEM; + } + + coex_req->bt_on = 1; + coex_req->disable_coexnull = disable_coexnull; + coex_req->enable_nullcts = enable_nullcts; + coex_req->enable_periodic_timer = 0; + coex_req->coex_timeslot_set = 0; + + /* Send the MM_SET_COEX_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, coex_req, 1, MM_SET_COEX_CFM, NULL); + + return (error); +}; + + +int rwnx_send_rf_config_req(struct rwnx_hw *rwnx_hw, u8_l ofst, u8_l sel, u8_l *tbl, u16_l len) +{ + struct mm_set_rf_config_req *rf_config_req; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_RF_CONFIG_REQ message */ + rf_config_req = rwnx_msg_zalloc(MM_SET_RF_CONFIG_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_rf_config_req)); + + if (!rf_config_req) { + return -ENOMEM; + } + + rf_config_req->table_sel = sel; + rf_config_req->table_ofst = ofst; + rf_config_req->table_num = 16; + rf_config_req->deft_page = 0; + + memcpy(rf_config_req->data, tbl, len); + + /* Send the MM_SET_RF_CONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, rf_config_req, 1, MM_SET_RF_CONFIG_CFM, NULL); + + return (error); +} + +#ifdef RF_WRITE_FILE + +#define FW_PATH_MAX_LEN_RF 200 +extern char aic_fw_path[FW_PATH_MAX_LEN_RF]; + +int rwnx_rf_write_file(void *buf, int buf_len) +{ + int sum = 0, len = 0; + char *path = NULL; + struct file *fp = NULL; + loff_t pos = 0; +#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 10, 0) + mm_segment_t fs; +#endif + + AICWFDBG(LOGINFO, "%s\n", __func__); + path = __getname(); + if (!path) { + AICWFDBG(LOGINFO, "get path fail\n"); + return -1; + } + + len = snprintf(path, FW_PATH_MAX_LEN_RF, "%s/%s", aic_fw_path, FW_RF_CALIB_FILE); + AICWFDBG(LOGINFO, "%s: path=%s\n", __func__,path); + + fp = filp_open(path, O_RDWR | O_CREAT, 0644); + if (IS_ERR(fp)) { + AICWFDBG(LOGINFO, "fp open fial\n"); + __putname(path); + fp = NULL; + return -2; + } + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 10, 0) + fs = get_fs(); + set_fs(KERNEL_DS); +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + sum = kernel_write(fp, buf, buf_len, &pos); +#else LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + sum = kernel_write(fp, (char *)buf, buf_len, pos); +#endif + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 10, 0) + set_fs(fs); +#endif + + __putname(path); + filp_close(fp, NULL); + fp = NULL; + + return 0; + +} + +int is_file_exist_rf(char* name) +{ + char *path = NULL; + struct file *fp = NULL; + int len; + + path = __getname(); + if (!path) { + AICWFDBG(LOGINFO, "%s getname fail\n", __func__); + return -1; + } + + len = snprintf(path, FW_PATH_MAX_LEN_RF, "%s/%s", aic_fw_path, name); + + fp = filp_open(path, O_RDONLY, 0); + if (IS_ERR(fp)) { + __putname(path); + fp = NULL; + return 0; + } else { + __putname(path); + filp_close(fp, NULL); + fp = NULL; + return 1; + } +} +#endif + +extern void get_userconfig_xtal_cap(xtal_cap_conf_t *xtal_cap); + +int rwnx_send_rf_calib_req(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm) +{ + xtal_cap_conf_t xtal_cap = {0,}; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + AICWFDBG(LOGINFO, "%s in,chipid=%d\n", __func__,rwnx_hw->usbdev->chipid); + + if((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2) ||(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2)) + { + struct mm_set_rf_calib_req_v2 *rf_calib_req; + struct mm_set_rf_calib_cfm_v2 cfm2; + + /* Build the MM_SET_RF_CALIB_REQ message */ + rf_calib_req = rwnx_msg_zalloc(MM_SET_RF_CALIB_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_rf_calib_req_v2)); + + if (!rf_calib_req) { + return -ENOMEM; + } + + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + rf_calib_req->cal_cfg_24g = 0xbf; + rf_calib_req->cal_cfg_5g = 0x3f; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ + rf_calib_req->cal_cfg_24g = 0x0f8f; + rf_calib_req->cal_cfg_5g = 0; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){ + rf_calib_req->cal_cfg_24g = 0x0f8f; + rf_calib_req->cal_cfg_5g = 0x0f0f; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2){ + rf_calib_req->cal_cfg_24g = 0x0f8f; + rf_calib_req->cal_cfg_5g = 0x0f0f; + } + + rf_calib_req->param_alpha = 0x0c34c008; + rf_calib_req->bt_calib_en = 0; + rf_calib_req->bt_calib_param = 0x264203; + + get_userconfig_xtal_cap(&xtal_cap); + + if (xtal_cap.enable) { + AICWFDBG(LOGINFO, "user xtal cap: %d, cap_fine: %d\n", xtal_cap.xtal_cap, xtal_cap.xtal_cap_fine); + rf_calib_req->xtal_cap = xtal_cap.xtal_cap; + rf_calib_req->xtal_cap_fine = xtal_cap.xtal_cap_fine; + } else { + rf_calib_req->xtal_cap = 0; + rf_calib_req->xtal_cap_fine = 0; + } + +#ifdef RF_WRITE_FILE + if(is_file_exist_rf(FW_RF_CALIB_FILE) == 1) + { + void *buffer = NULL; + char *path = NULL; + struct file *fp = NULL; + int size = 0, len = 0;// i = 0; + ssize_t rdlen = 0; + unsigned char decrypt[16]; + //u32 **fw_buf =NULL; + //struct kstat stat; + + + AICWFDBG(LOGINFO, "%s: file exist in\n", __func__); + path = __getname(); + if (!path) + { + return -1; + } + len = snprintf(path, FW_PATH_MAX_LEN_RF, "%s/%s", aic_fw_path, FW_RF_CALIB_FILE); + AICWFDBG(LOGINFO, "%s: path=%s\n", __func__,path); + + if (len >= FW_PATH_MAX_LEN_RF) + { + AICWFDBG(LOGERROR, "%s: %s file's path too long\n", __func__, FW_RF_CALIB_FILE); + __putname(path); + return -2; + } + + fp = filp_open(path, O_RDONLY, 0); + if (IS_ERR_OR_NULL(fp)) + { + AICWFDBG(LOGERROR, "%s: %s file failed to open\n", __func__, FW_RF_CALIB_FILE); + __putname(path); + fp = NULL; + return -3; + } + + //vfs_stat(path, &stat); + size =(int)fp->f_inode->i_size; + AICWFDBG(LOGINFO, "%s: file is %d bytes\n", __func__,size); + buffer = vmalloc(size); + if (!buffer) + { + __putname(path); + filp_close(fp, NULL); + fp = NULL; + return -4; + } + + #if LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 16) + rdlen = kernel_read(fp, buffer, size, &fp->f_pos); + #else + rdlen = kernel_read(fp, fp->f_pos, buffer, size); + #endif + //rwnx_data_dump("cal_res.res_data",buffer,size); + + if (size != rdlen) + { + AICWFDBG(LOGERROR, "%s: %s file rdlen invalid %d\n", __func__, FW_RF_CALIB_FILE, (int)rdlen); + __putname(path); + filp_close(fp, NULL); + fp = NULL; + vfree(buffer); + buffer = NULL; + return -5; + } + + if (rdlen > 0) + { + fp->f_pos += rdlen; + } + + __putname(path); + filp_close(fp, NULL); + fp = NULL; + + if(rdlen == sizeof(rf_calib_req->cal_res.res_data)) + { + memcpy((u8 *)rf_calib_req->cal_res.res_data,buffer,rdlen); + //SET Demo + rf_calib_req->cal_res.magic_num = DRIVER_SET_WIFI_CALRES_MAGIC_NUM; + rf_calib_req->cal_res.info_flag = 0x4F; + rf_calib_req->cal_res.calib_flag = 0x00; + // before req, write testmode rf calib reg->cal res.res_data[], data read from file + } + else + { + AICWFDBG(LOGERROR, "%s: %s file rdlen %d is not equal rf_calib_req \n", __func__, FW_RF_CALIB_FILE, (int)rdlen); + vfree(buffer); + return -6; + } + vfree(buffer); + } + else +#endif + { + AICWFDBG(LOGINFO, "%s: file not exist in\n", __func__); + //GET Demo + rf_calib_req->cal_res.magic_num = DRIVER_GET_WIFI_CALRES_MAGIC_NUM; + rf_calib_req->cal_res.info_flag = 0x00; + rf_calib_req->cal_res.calib_flag = 0x4F; + // after req, read mm_set_rf_calib_cfm.cal_res.res data[] + // save the data into file + } + /* Send the MM_SET_RF_CALIB_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, rf_calib_req, 1, MM_SET_RF_CALIB_CFM, &cfm2); + +#ifdef RF_WRITE_FILE + if(is_file_exist_rf(FW_RF_CALIB_FILE) != 1) + { + void *buffer = NULL; + int buf_len = sizeof(cfm2.cal_res.res_data); + + AICWFDBG(LOGINFO, "%s: file not exist in2,buf_len=%d\n", __func__,buf_len); + buffer = vmalloc(buf_len); + if (!buffer) + { + return -4; + } + memcpy(buffer,cfm2.cal_res.res_data,buf_len); + //rwnx_data_dump("cal_res.res_data",buffer,buf_len); + rwnx_rf_write_file(buffer,buf_len); + vfree(buffer); + } +#endif + cfm->rxgain_24g_addr = cfm2.rxgain_24g_addr; + cfm->rxgain_5g_addr = cfm2.rxgain_5g_addr; + cfm->txgain_24g_addr = cfm2.txgain_24g_addr; + cfm->txgain_5g_addr = cfm2.txgain_5g_addr; + } + else + { + struct mm_set_rf_calib_req *rf_calib_req; + /* Build the MM_SET_RF_CALIB_REQ message */ + rf_calib_req = rwnx_msg_zalloc(MM_SET_RF_CALIB_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_rf_calib_req)); + + if (!rf_calib_req) { + return -ENOMEM; + } + + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + rf_calib_req->cal_cfg_24g = 0xbf; + rf_calib_req->cal_cfg_5g = 0x3f; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ + rf_calib_req->cal_cfg_24g = 0x0f8f; + rf_calib_req->cal_cfg_5g = 0; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){ + rf_calib_req->cal_cfg_24g = 0x0f8f; + rf_calib_req->cal_cfg_5g = 0x0f0f; + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2){ + rf_calib_req->cal_cfg_24g = 0x0f8f; + rf_calib_req->cal_cfg_5g = 0x0f0f; + } + + rf_calib_req->param_alpha = 0x0c34c008; + rf_calib_req->bt_calib_en = 0; + rf_calib_req->bt_calib_param = 0x264203; + + get_userconfig_xtal_cap(&xtal_cap); + + if (xtal_cap.enable) { + AICWFDBG(LOGINFO, "user xtal cap: %d, cap_fine: %d\n", xtal_cap.xtal_cap, xtal_cap.xtal_cap_fine); + rf_calib_req->xtal_cap = xtal_cap.xtal_cap; + rf_calib_req->xtal_cap_fine = xtal_cap.xtal_cap_fine; + } else { + rf_calib_req->xtal_cap = 0; + rf_calib_req->xtal_cap_fine = 0; + } + /* Send the MM_SET_RF_CALIB_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, rf_calib_req, 1, MM_SET_RF_CALIB_CFM, cfm); + } + + return (error); +}; + +int rwnx_send_get_macaddr_req(struct rwnx_hw *rwnx_hw, struct mm_get_mac_addr_cfm *cfm) +{ + struct mm_get_mac_addr_req *get_macaddr_req; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_GET_MAC_ADDR_REQ message */ + get_macaddr_req = rwnx_msg_zalloc(MM_GET_MAC_ADDR_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_get_mac_addr_req)); + + if (!get_macaddr_req) { + return -ENOMEM; + } + + get_macaddr_req->get = 1; + + /* Send the MM_GET_MAC_ADDR_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, get_macaddr_req, 1, MM_GET_MAC_ADDR_CFM, cfm); + + return (error); +}; + + +int rwnx_send_get_sta_info_req(struct rwnx_hw *rwnx_hw, u8_l sta_idx, struct mm_get_sta_info_cfm *cfm) +{ + struct mm_get_sta_info_req *get_info_req; + int error; + + /* Build the MM_GET_STA_INFO_REQ message */ + get_info_req = rwnx_msg_zalloc(MM_GET_STA_INFO_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_get_sta_info_req)); + + if (!get_info_req) { + return -ENOMEM; + } + + get_info_req->sta_idx = sta_idx; + + /* Send the MM_GET_STA_INFO_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, get_info_req, 1, MM_GET_STA_INFO_CFM, cfm); + + return error; +}; + + +#if 0 +int rwnx_send_get_sta_txinfo_req(struct rwnx_hw *rwnx_hw, u8_l sta_idx, struct mm_get_sta_txinfo_cfm *cfm) +{ + struct mm_get_sta_txinfo_req *get_txinfo_req; + int error; + + + /* Build the MM_GET_STA_TXINFO_REQ message */ + get_txinfo_req = rwnx_msg_zalloc(MM_GET_STA_TXINFO_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_get_sta_txinfo_req)); + + if (!get_txinfo_req) { + return -ENOMEM; + } + + get_txinfo_req->sta_idx = 1; + + /* Send the MM_GET_STA_TXINFO_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, get_txinfo_req, 1, MM_GET_STA_TXINFO_CFM, cfm); + + return (error); +} +#endif + +int rwnx_send_set_stack_start_req(struct rwnx_hw *rwnx_hw, u8_l on, u8_l efuse_valid, u8_l set_vendor_info, + u8_l fwtrace_redir_en, struct mm_set_stack_start_cfm *cfm) +{ + struct mm_set_stack_start_req *req; + int error; + + /* Build the MM_SET_STACK_START_REQ message */ + req = rwnx_msg_zalloc(MM_SET_STACK_START_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_stack_start_req)); + + if (!req) { + return -ENOMEM; + } + + req->is_stack_start = on; + req->efuse_valid = efuse_valid; + req->set_vendor_info = set_vendor_info; + req->fwtrace_redir = fwtrace_redir_en; + /* Send the MM_GET_STA_TXINFO_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_STACK_START_CFM, cfm); + + return error; +} + +int rwnx_send_set_temp_comp_req(struct rwnx_hw *rwnx_hw, struct mm_set_vendor_swconfig_cfm *cfm) +{ + struct mm_set_vendor_swconfig_req *req; + int ret; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + /* Build the TEMP_COMP_SET_REQ message */ + req = rwnx_msg_zalloc(MM_SET_VENDOR_SWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_vendor_swconfig_req)); + if (!req) { + printk("%s msg_alloc fail\n", __func__); + return -ENOMEM; + } + req->swconfig_id = TEMP_COMP_SET_REQ; + req->temp_comp_set_req.enable = 1; + req->temp_comp_set_req.tmr_period_ms = 15 * 1000; + + ret = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, cfm); + if (!ret) + printk("temp_comp status: %d\n", cfm->temp_comp_set_cfm.status); + else { + printk("%s msg_fail\n", __func__); + return ret; + } + return ret; +} + +int rwnx_send_vendor_hwconfig_req(struct rwnx_hw *rwnx_hw, uint32_t hwconfig_id, int32_t *param, int32_t *param_out) +{ + struct mm_set_acs_txop_req *req0; + struct mm_set_channel_access_req *req1; + struct mm_set_mac_timescale_req *req2; + struct mm_set_cca_threshold_req *req3; + struct mm_set_bwmode_req *req4; + struct mm_set_customized_freq_req *req5; + + int error = 0; + int i; + + switch (hwconfig_id) + { + case ACS_TXOP_REQ: + /* Build the ACS_TXOP_REQ message */ + req0= rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_acs_txop_req) ); + if (!req0) + return -ENOMEM; + req0->hwconfig_id = hwconfig_id; + req0->txop_be = param[0]; + req0->txop_bk = param[1]; + req0->txop_vi = param[2]; + req0->txop_vo = param[3]; + printk("set_acs_txop_req: be: %x,bk: %x,vi: %x,vo: %x\n", + req0->txop_be, req0->txop_bk, req0->txop_vi, req0->txop_vo); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req0, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + + case CHANNEL_ACCESS_REQ: + /* Build the CHANNEL_ACCESS_REQ message */ + req1 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_channel_access_req)); + if (!req1) + return -ENOMEM; + req1->hwconfig_id = hwconfig_id; + req1->edca[0] = param[0]; + req1->edca[1] = param[1]; + req1->edca[2] = param[2]; + req1->edca[3] = param[3]; + req1->vif_idx = param[4]; + req1->retry_cnt = param[5]; + req1->rts_en = param[6]; + req1->long_nav_en = param[7]; + req1->cfe_en = param[8]; + req1->rc_retry_cnt[0] = param[9]; + req1->rc_retry_cnt[1] = param[10]; + req1->rc_retry_cnt[2] = param[11]; + req1->ccademod_th = param[12]; + req1->remove_1m2m = param[13]; + printk("set_channel_access_req:edca[]= %x %x %x %x\nvif_idx: %x, retry_cnt: %x, rts_en: %x, long_nav_en: %x, cfe_en: %x, rc_retry_cnt: %x:%x:%x, ccademod_th = %d remove_1m2m = %d\n", + req1->edca[0], req1->edca[1], req1->edca[2], req1->edca[3], req1->vif_idx, req1->retry_cnt, req1->rts_en, req1->long_nav_en, req1->cfe_en, req1->rc_retry_cnt[0],req1->rc_retry_cnt[1], req1->rc_retry_cnt[2], req1->ccademod_th, req1->remove_1m2m); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req1, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + + case MAC_TIMESCALE_REQ: + /* Build the MAC_TIMESCALE_REQ message */ + req2 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_mac_timescale_req)); + if (!req2) + return -ENOMEM; + req2->hwconfig_id = hwconfig_id; + req2->sifsA_time = param[0]; + req2->sifsB_time = param[1]; + req2->slot_time = param[2]; + req2->rx_startdelay_ofdm = param[3]; + req2->rx_startdelay_long = param[4]; + req2->rx_startdelay_short = param[5]; + printk("set_mac_timescale_req:sifsA_time: %x, sifsB_time: %x, slot_time: %x, rx_startdelay ofdm:%x long %x short %x\n", + req2->sifsA_time, req2->sifsB_time, req2->slot_time, req2->rx_startdelay_ofdm, req2->rx_startdelay_long, req2->rx_startdelay_short); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req2, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + + case CCA_THRESHOLD_REQ: + /* Build the CCA_THRESHOLD_REQ message */ + req3 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_cca_threshold_req)); + if (!req3) + return -ENOMEM; + req3->hwconfig_id = hwconfig_id; + req3->auto_cca_en = param[0]; + req3->cca20p_rise_th = param[1]; + req3->cca20s_rise_th = param[2]; + req3->cca20p_fall_th = param[3]; + req3->cca20s_fall_th = param[4]; + printk("cca_threshold_req: auto_cca_en:%d\ncca20p_rise_th = %d\ncca20s_rise_th = %d\ncca20p_fall_th = %d\ncca20s_fall_th = %d\n", + req3->auto_cca_en, req3->cca20p_rise_th, req3->cca20s_rise_th, req3->cca20p_fall_th, req3->cca20s_fall_th); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req3, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + case BWMODE_REQ: + /* Build the SET_BWMODE_REQ message */ + req4 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_bwmode_req)); + if (!req4) + return -ENOMEM; + req4->hwconfig_id = hwconfig_id; + req4->bwmode = param[0]; + printk("bwmode :%d\n", req4->bwmode); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req4, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + case CHIP_TEMP_GET_REQ: + if ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC) || + (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW)) + { + struct mm_get_chip_temp_req *req; + struct mm_set_vendor_hwconfig_cfm cfm = {0,}; + /* Build the CHIP_TEMP_GET_REQ message */ + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_get_chip_temp_req)); + if (!req) + return -ENOMEM; + req->hwconfig_id = hwconfig_id; + /* Send the MM_SET_VENDOR_HWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, &cfm); + if (!error) { + if (param_out) { + param_out[0] = (int32_t)cfm.chip_temp_cfm.degree; + } + printk("get_chip_temp degree=%d\n", cfm.chip_temp_cfm.degree); + } else { + printk("get_chip_temp err=%d\n", error); + } + } + case CUSTOMIZED_FREQ_REQ: + /* Build the CUSTOMIZED_FREQ_REQ message */ + req5 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_customized_freq_req)); + if (!req5) + return -ENOMEM; + req5->hwconfig_id = hwconfig_id; + for (i = 0; i < 4; i++) { + req5->raw_freq[i] = param [i]; + req5->map_freq[i] = param [i + 4]; + } + printk("customized_freq_req: raw_freq[ %d %d %d %d ], map_freq[ %d %d %d %d]\n", + param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7]); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req5, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + + break; + default: + return -ENOMEM; + } + return error; +} + +int rwnx_send_vendor_swconfig_req(struct rwnx_hw *rwnx_hw, uint32_t swconfig_id, int32_t *param_in, int32_t *param_out) +{ + struct mm_set_vendor_swconfig_req *req; + struct mm_set_vendor_swconfig_cfm cfm = {0,}; + int error; + + req = rwnx_msg_zalloc(MM_SET_VENDOR_SWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_vendor_swconfig_req)); + if (!req) { + return -ENOMEM; + } + req->swconfig_id = swconfig_id; + + switch (swconfig_id) + { + case BCN_CFG_REQ: + /* Build the BCN_CFG_REQ message */ + req->bcn_cfg_req.tim_bcmc_ignored_enable = (bool_l)param_in[0]; + printk("bcn_cfg_req: tim_bcmc_ignd=%d\n", req->bcn_cfg_req.tim_bcmc_ignored_enable); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (int32_t)cfm.bcn_cfg_cfm.tim_bcmc_ignored_status; + printk("status=%d\n", cfm.bcn_cfg_cfm.tim_bcmc_ignored_status); + } + break; + + case TEMP_COMP_SET_REQ: + /* Build the TEMP_COMP_SET_REQ message */ + req->temp_comp_set_req.enable = (u8_l)param_in[0]; + req->temp_comp_set_req.tmr_period_ms = (u32_l)param_in[1]; + printk("temp_comp_set_req: en=%d, tmr=%x\n", + req->temp_comp_set_req.enable, req->temp_comp_set_req.tmr_period_ms); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (int32_t)cfm.temp_comp_set_cfm.status; + printk("status=%d\n", cfm.temp_comp_set_cfm.status); + } + break; + + case TEMP_COMP_GET_REQ: + printk("temp_comp_get_req\n"); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (int32_t)cfm.temp_comp_get_cfm.status; + param_out[1] = (int32_t)cfm.temp_comp_get_cfm.degree; + printk("status=%d, degree=%d\n", + cfm.temp_comp_get_cfm.status, cfm.temp_comp_get_cfm.degree); + } + break; + + case EXT_FLAGS_SET_REQ: + /* Build the EXT_FLAGS_SET_REQ message */ + req->ext_flags_set_req.user_flags = (u32_l)param_in[0]; + printk("ext_flags_set_req: flags=%x\n", + req->ext_flags_set_req.user_flags); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (uint32_t)cfm.ext_flags_set_cfm.user_flags; + printk("cfm flags=%x\n", cfm.ext_flags_set_cfm.user_flags); + } + break; + + case EXT_FLAGS_GET_REQ: + /* Build the EXT_FLAGS_GET_REQ message */ + printk("ext_flags_get_req\n"); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (uint32_t)cfm.ext_flags_get_cfm.user_flags; + printk("cfm flags=%x\n", cfm.ext_flags_get_cfm.user_flags); + } + break; + + case EXT_FLAGS_MASK_SET_REQ: + /* Build the EXT_FLAGS_MASK_SET_REQ message */ + req->ext_flags_mask_set_req.user_flags_mask = (u32_l)param_in[0]; + req->ext_flags_mask_set_req.user_flags_val = (u32_l)param_in[1]; + printk("ext_flags_mask_set_req: flags mask=0x%x, val=0x%x\n", + req->ext_flags_mask_set_req.user_flags_mask, req->ext_flags_mask_set_req.user_flags_val); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (uint32_t)cfm.ext_flags_mask_set_cfm.user_flags; + printk("cfm flags=%x\n", cfm.ext_flags_mask_set_cfm.user_flags); + } + break; + + default: + error = -ENOMEM; + break; + } + + return error; +} + +int rwnx_send_vendor_hwconfig_req_x2(struct rwnx_hw *rwnx_hw, uint32_t hwconfig_id, int32_t *param, int32_t *param_out) +{ + struct mm_set_acs_txop_req *req0; + struct mm_set_channel_access_req *req1; + struct mm_set_mac_timescale_req *req2; + struct mm_set_cca_threshold_req *req3; + struct mm_set_bwmode_req *req4; + + int error = 0; + + switch (hwconfig_id) + { + case ACS_TXOP_REQ_X2: + /* Build the ACS_TXOP_REQ message */ + req0= rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_acs_txop_req) ); + if (!req0) + return -ENOMEM; + req0->hwconfig_id = hwconfig_id; + req0->txop_be = param[0]; + req0->txop_bk = param[1]; + req0->txop_vi = param[2]; + req0->txop_vo = param[3]; + AICWFDBG(LOGINFO, "set_acs_txop_req: be: %x,bk: %x,vi: %x,vo: %x\n", + req0->txop_be, req0->txop_bk, req0->txop_vi, req0->txop_vo); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req0, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + + case CHANNEL_ACCESS_REQ_X2: + /* Build the CHANNEL_ACCESS_REQ message */ + req1 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_channel_access_req)); + if (!req1) + return -ENOMEM; + req1->hwconfig_id = hwconfig_id; + req1->edca[0] = param[0]; + req1->edca[1] = param[1]; + req1->edca[2] = param[2]; + req1->edca[3] = param[3]; + req1->vif_idx = param[4]; + req1->retry_cnt = param[5]; + req1->rts_en = param[6]; + req1->long_nav_en = param[7]; + req1->cfe_en = param[8]; + req1->rc_retry_cnt[0] = param[9]; + req1->rc_retry_cnt[1] = param[10]; + req1->rc_retry_cnt[2] = param[11]; + req1->ccademod_th = param[12]; + req1->remove_1m2m = param[13]; + AICWFDBG(LOGINFO, "set_channel_access_req:edca[]= %x %x %x %x\nvif_idx: %x, retry_cnt: %x, rts_en: %x, long_nav_en: %x, cfe_en: %x, rc_retry_cnt: %x:%x:%x, ccademod_th = %d remove_1m2m = %x\n", + req1->edca[0], req1->edca[1], req1->edca[2], req1->edca[3], req1->vif_idx, req1->retry_cnt, req1->rts_en, req1->long_nav_en, req1->cfe_en, req1->rc_retry_cnt[0],req1->rc_retry_cnt[1], req1->rc_retry_cnt[2], req1->ccademod_th, req1->remove_1m2m); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req1, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + + case MAC_TIMESCALE_REQ_X2: + /* Build the MAC_TIMESCALE_REQ message */ + req2 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_mac_timescale_req)); + if (!req2) + return -ENOMEM; + req2->hwconfig_id = hwconfig_id; + req2->sifsA_time = param[0]; + req2->sifsB_time = param[1]; + req2->slot_time = param[2]; + req2->rx_startdelay_ofdm = param[3]; + req2->rx_startdelay_long = param[4]; + req2->rx_startdelay_short = param[5]; + AICWFDBG(LOGINFO, "set_mac_timescale_req:sifsA_time: %x, sifsB_time: %x, slot_time: %x, rx_startdelay ofdm:%x long %x short %x\n", + req2->sifsA_time, req2->sifsB_time, req2->slot_time, req2->rx_startdelay_ofdm, req2->rx_startdelay_long, req2->rx_startdelay_short); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req2, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + + case CCA_THRESHOLD_REQ_X2: + /* Build the CCA_THRESHOLD_REQ message */ + req3 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_cca_threshold_req)); + if (!req3) + return -ENOMEM; + req3->hwconfig_id = hwconfig_id; + req3->auto_cca_en = param[0]; + req3->cca20p_rise_th = param[1]; + req3->cca20s_rise_th = param[2]; + req3->cca20p_fall_th = param[3]; + req3->cca20s_fall_th = param[4]; + AICWFDBG(LOGINFO, "cca_threshold_req: auto_cca_en:%d\ncca20p_rise_th = %d\ncca20s_rise_th = %d\ncca20p_fall_th = %d\ncca20s_fall_th = %d\n", + req3->auto_cca_en, req3->cca20p_rise_th, req3->cca20s_rise_th, req3->cca20p_fall_th, req3->cca20s_fall_th); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req3, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + + case BWMODE_REQ_X2: + /* Build the SET_BWMODE_REQ message */ + req4 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_bwmode_req)); + if (!req4) + return -ENOMEM; + req4->hwconfig_id = hwconfig_id; + req4->bwmode = param[0]; + AICWFDBG(LOGINFO, "bwmode :%d\n", req4->bwmode); + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req4, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + break; + case CHIP_TEMP_GET_REQ_X2: + //if ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC) || + // (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW)) + { + struct mm_get_chip_temp_req *req; + struct mm_set_vendor_hwconfig_cfm cfm = {0,}; + /* Build the CHIP_TEMP_GET_REQ message */ + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_get_chip_temp_req)); + if (!req) + return -ENOMEM; + req->hwconfig_id = hwconfig_id; + /* Send the MM_SET_VENDOR_HWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, &cfm); + if (!error) { + if (param_out) { + param_out[0] = (int32_t)cfm.chip_temp_cfm.degree; + } + AICWFDBG(LOGINFO, "get_chip_temp degree=%d\n", cfm.chip_temp_cfm.degree); + } else { + AICWFDBG(LOGERROR, "get_chip_temp err=%d\n", error); + } + } + break; + + case STBC_MCS_SET_REQ_X2: + { + struct mm_get_stbc_msc_req *req; + + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_get_stbc_msc_req)); + if (!req) + return -ENOMEM; + req->hwconfig_id = hwconfig_id; + req->enable = (u8)param[0]; + req->mcs_thresh = (u8)param[1]; + + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (error) { + AICWFDBG(LOGERROR, "set_stbc_err =%d\n", error); + } + } + break; + case MAX_AGG_TX_CNT_REQ_X2: + { + struct mm_set_max_tx_agg_cnt_req *req; + + if((u8)param[0] > 1 || (u8)param[1] > 11) { + AICWFDBG(LOGERROR, "hwvendor max_agg_cnt param error:%x,%x\n", (u8)param[0], (u8)param[1]); + break; + } + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_max_tx_agg_cnt_req)); + if (!req) + return -ENOMEM; + + req->hwconfig_id = hwconfig_id; + req->enale = (u8)param[0]; + req->mcs_thresh = (u8)param[1]; + req->max_agg_cnt[0] = (u8)param[2]; + req->max_agg_cnt[1] = (u8)param[3]; + req->max_agg_cnt[2] = (u8)param[4]; + req->max_agg_cnt[3] = (u8)param[5]; + + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (error) { + AICWFDBG(LOGERROR, "set agg max cnt err=%d\n", error); + } + AICWFDBG(LOGINFO, "hwvendor max_aggr_cnt:en=%d, mcs_thresh=%d, 0x%x, 0x%x, 0x%x, 0x%x\n", req->enale, req->mcs_thresh, req->max_agg_cnt[0], + req->max_agg_cnt[1], req->max_agg_cnt[2], req->max_agg_cnt[3]); + } + break; + case MAX_BW_MCS_THRESH_SET_REQ_X2: + { + struct mm_set_max_bw_mcs_thresh_req *req; + if((u8)param[0] > 1 || (u8)param[1] > 11) { + AICWFDBG(LOGERROR, "hwvendor max_bw_mcs param error:%x,%x\n", (u8)param[0], (u8)param[1]); + break; + } + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_max_bw_mcs_thresh_req)); + if (!req) + return -ENOMEM; + + req->hwconfig_id = hwconfig_id; + req->enale = (u8)param[0]; + req->max_bw_mcs_thresh = (u8)param[1]; + + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (error) { + AICWFDBG(LOGERROR, "set max_bw_mcs err=%d\n", error); + } + AICWFDBG(LOGINFO, "set max_bw_mcs: enable=%d, thresh=%d\n", (u8)param[0], (u8)param[1]); + } + break; + case DCM_FORCE_EN_REQ_X2: + { + struct mm_set_dcm_force_en_req *req; + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_dcm_force_en_req)); + if (!req) + return -ENOMEM; + + req->hwconfig_id = hwconfig_id; + req->enable = (u8)param[0]; + + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (error) { + AICWFDBG(LOGERROR, "set dcm_force_en err=%d\n", error); + } + AICWFDBG(LOGINFO, "set dcm_force_en: enable=%d\n", (u8)param[0]); + } + break; + case AUTO_CCA_EN_REQ_X2: + { + struct mm_set_auto_cca_en_req *req; + + if((u8)param[0] > 1 || (int8_t)param[1] > 0 || (u8)param[2] > 1 || (int8_t)param[3]>0) { + AICWFDBG(LOGERROR, "hwvendor auto_cca_en param error:%x,%d,%x,%d\n", (u8)param[0], (int8_t)param[1], + (u8)param[2], (int8_t)param[3]); + break; + } + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_auto_cca_en_req)); + if (!req) + return -ENOMEM; + + req->hwconfig_id = hwconfig_id; + req->enable = (u8)param[0]; + req->max_cca_thresh = (int8_t)param[1]; + req->default_cca_set = (u8)param[2]; + req->default_cca_thresh = (int8_t)param[3]; + + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (error) { + AICWFDBG(LOGERROR, "set auto cca en err=%d\n", error); + } + AICWFDBG(LOGINFO, "set auto cca en: enable=%d, thresh=%d, default_set=%d, default_thresh=%d\n", + (u8)param[0], (int8_t)param[1], (u8)param[2], (int8_t)param[3]); + } + break; + case NSS_1T2R_REQ_X2: + { + struct mm_set_nss_1t2r_req *req; + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_nss_1t2r_req)); + if (!req) + return -ENOMEM; + + req->hwconfig_id = hwconfig_id; + req->enable = (u8)param[0]; + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (error) { + AICWFDBG(LOGERROR, "set nss 1t2r err=%d\n", error); + } + AICWFDBG(LOGINFO, "set nss 1t2r: enable=%d\n", (u8)param[0]); + } + break; + case ON_AIR_DUTY_CYCLE_REQ_X2: + { + struct mm_set_on_air_duty_cycle_req *req; + if((u8)param[1]<1 || (u8)param[1] > 100) { + AICWFDBG(LOGERROR, "hwvendor on_air_duty_cycle param error:%x,%x\n", (u8)param[0], (u8)param[1]); + break; + } + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_on_air_duty_cycle_req)); + if (!req) + return -ENOMEM; + + req->hwconfig_id = hwconfig_id; + req->enable = (u8)param[0]; + req->percent = (u8)param[1]; + + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (error) { + AICWFDBG(LOGERROR, "set on_air duty cycle err=%d\n", error); + } + AICWFDBG(LOGINFO, "set on_air duty cycle: enable=%d, percent=%d\n", (u8)param[0], (u8)param[1]); + } + break; + default: + return -ENOMEM; + } + return error; +} + +int rwnx_send_vendor_swconfig_req_x2(struct rwnx_hw *rwnx_hw, uint32_t swconfig_id, int32_t *param_in, int32_t *param_out) +{ + struct mm_set_vendor_swconfig_req *req; + struct mm_set_vendor_swconfig_cfm cfm = {0,}; + int error; + + req = rwnx_msg_zalloc(MM_SET_VENDOR_SWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_vendor_swconfig_req)); + if (!req) { + return -ENOMEM; + } + req->swconfig_id = swconfig_id; + + switch (swconfig_id) + { + case BCN_CFG_REQ_X2: + /* Build the BCN_CFG_REQ message */ + req->bcn_cfg_req.tim_bcmc_ignored_enable = (bool_l)param_in[0]; + AICWFDBG(LOGINFO, "bcn_cfg_req: tim_bcmc_ignd=%d\n", req->bcn_cfg_req.tim_bcmc_ignored_enable); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (int32_t)cfm.bcn_cfg_cfm.tim_bcmc_ignored_status; + AICWFDBG(LOGINFO, "status=%d\n", cfm.bcn_cfg_cfm.tim_bcmc_ignored_status); + } + break; + + case TEMP_COMP_SET_REQ_X2: + /* Build the TEMP_COMP_SET_REQ message */ + req->temp_comp_set_req.enable = (u8_l)param_in[0]; + req->temp_comp_set_req.tmr_period_ms = (u32_l)param_in[1]; + AICWFDBG(LOGINFO, "temp_comp_set_req: en=%d, tmr=%x\n", + req->temp_comp_set_req.enable, req->temp_comp_set_req.tmr_period_ms); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (int32_t)cfm.temp_comp_set_cfm.status; + AICWFDBG(LOGINFO, "status=%d\n", cfm.temp_comp_set_cfm.status); + } + break; + + case TEMP_COMP_GET_REQ_X2: + AICWFDBG(LOGTRACE, "temp_comp_get_req\n"); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (int32_t)cfm.temp_comp_get_cfm.status; + param_out[1] = (int32_t)cfm.temp_comp_get_cfm.degree; + AICWFDBG(LOGINFO, "status=%d, degree=%d\n", + cfm.temp_comp_get_cfm.status, cfm.temp_comp_get_cfm.degree); + } + break; + + case EXT_FLAGS_SET_REQ_X2: + /* Build the EXT_FLAGS_SET_REQ message */ + req->ext_flags_set_req.user_flags = (u32_l)param_in[0]; + AICWFDBG(LOGINFO, "ext_flags_set_req: flags=%x\n", + req->ext_flags_set_req.user_flags); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (uint32_t)cfm.ext_flags_set_cfm.user_flags; + AICWFDBG(LOGINFO, "cfm flags=%x\n", cfm.ext_flags_set_cfm.user_flags); + } + break; + + case EXT_FLAGS_GET_REQ_X2: + /* Build the EXT_FLAGS_GET_REQ message */ + AICWFDBG(LOGTRACE, "ext_flags_get_req\n"); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (uint32_t)cfm.ext_flags_get_cfm.user_flags; + AICWFDBG(LOGINFO, "cfm flags=%x\n", cfm.ext_flags_get_cfm.user_flags); + } + break; + + case EXT_FLAGS_MASK_SET_REQ_X2: + /* Build the EXT_FLAGS_MASK_SET_REQ message */ + req->ext_flags_mask_set_req.user_flags_mask = (u32_l)param_in[0]; + req->ext_flags_mask_set_req.user_flags_val = (u32_l)param_in[1]; + AICWFDBG(LOGINFO, "ext_flags_mask_set_req: flags mask=0x%x, val=0x%x\n", + req->ext_flags_mask_set_req.user_flags_mask, req->ext_flags_mask_set_req.user_flags_val); + /* Send the MM_SET_VENDOR_SWCONFIG_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if (!error) { + param_out[0] = (uint32_t)cfm.ext_flags_mask_set_cfm.user_flags; + AICWFDBG(LOGINFO, "cfm flags=%x\n", cfm.ext_flags_mask_set_cfm.user_flags); + } + break; + case TWO_ANT_RSSI_GET_REQ_X2: + AICWFDBG(LOGINFO, "get 2 ant rssi:\n"); + error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, &cfm); + if(error) { + AICWFDBG(LOGINFO, "get 2 ant rssi fail\n"); + } else + param_out[0] = *((u32 *)&cfm + 1); + break; + default: + error = -ENOMEM; + break; + } + + return error; +} + + +int rwnx_send_mask_set_ext_flags_req(struct rwnx_hw *rwnx_hw, uint32_t flags_mask, uint32_t flags_val, struct mm_set_vendor_swconfig_cfm *cfm) +{ + struct mm_set_vendor_swconfig_req *req; + int ret; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + /* Build the REQ message */ + req = rwnx_msg_zalloc(MM_SET_VENDOR_SWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_vendor_swconfig_req)); + if (!req) { + AICWFDBG(LOGERROR, "%s msg_alloc fail\n", __func__); + return -ENOMEM; + } + req->swconfig_id = EXT_FLAGS_MASK_SET_REQ; + req->ext_flags_mask_set_req.user_flags_mask = (u32_l)flags_mask; + req->ext_flags_mask_set_req.user_flags_val = (u32_l)flags_val; + + ret = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_SWCONFIG_CFM, cfm); + if (!ret) { + AICWFDBG(LOGINFO, "curr ext_flags=%x\n", cfm->ext_flags_mask_set_cfm.user_flags); + } else { + AICWFDBG(LOGERROR, "%s send_msg_fail\n", __func__); + return ret; + } + return ret; +} + +int rwnx_send_get_fw_version_req(struct rwnx_hw *rwnx_hw, struct mm_get_fw_version_cfm *cfm) +{ + void *req; + int error; + + /* Build the MM_GET_FW_VERSION_REQ message */ + req = rwnx_msg_zalloc(MM_GET_FW_VERSION_REQ, TASK_MM, DRV_TASK_ID, sizeof(u8)); + + if (!req) { + return -ENOMEM; + } + + /* Send the MM_GET_FW_VERSION_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, req, 1, MM_GET_FW_VERSION_CFM, cfm); + + return error; +} + + +extern void get_userconfig_txpwr_idx(txpwr_idx_conf_t *txpwr_idx); + +int rwnx_send_txpwr_idx_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_idx_req *txpwr_idx_req; + txpwr_idx_conf_t *txpwr_idx; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_IDX_REQ message */ + txpwr_idx_req = rwnx_msg_zalloc(MM_SET_TXPWR_IDX_LVL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_idx_req)); + + if (!txpwr_idx_req) { + return -ENOMEM; + } + + txpwr_idx = &txpwr_idx_req->txpwr_idx; + txpwr_idx->enable = 1; + txpwr_idx->dsss=9; + txpwr_idx->ofdmlowrate_2g4=8; + txpwr_idx->ofdm64qam_2g4=8; + txpwr_idx->ofdm256qam_2g4=8; + txpwr_idx->ofdm1024qam_2g4=8; + txpwr_idx->ofdmlowrate_5g=11; + txpwr_idx->ofdm64qam_5g=10; + txpwr_idx->ofdm256qam_5g=9; + txpwr_idx->ofdm1024qam_5g=9; + + get_userconfig_txpwr_idx(txpwr_idx); + + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_idx->enable); + AICWFDBG(LOGINFO, "%s:dsss:%d\r\n", __func__, txpwr_idx->dsss); + AICWFDBG(LOGINFO, "%s:ofdmlowrate_2g4:%d\r\n", __func__, txpwr_idx->ofdmlowrate_2g4); + AICWFDBG(LOGINFO, "%s:ofdm64qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm64qam_2g4); + AICWFDBG(LOGINFO, "%s:ofdm256qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm256qam_2g4); + AICWFDBG(LOGINFO, "%s:ofdm1024qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm1024qam_2g4); + AICWFDBG(LOGINFO, "%s:ofdmlowrate_5g:%d\r\n", __func__, txpwr_idx->ofdmlowrate_5g); + AICWFDBG(LOGINFO, "%s:ofdm64qam_5g:%d\r\n", __func__, txpwr_idx->ofdm64qam_5g); + AICWFDBG(LOGINFO, "%s:ofdm256qam_5g:%d\r\n", __func__, txpwr_idx->ofdm256qam_5g); + AICWFDBG(LOGINFO, "%s:ofdm1024qam_5g:%d\r\n", __func__, txpwr_idx->ofdm1024qam_5g); + + /* Send the MM_SET_TXPWR_IDX_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_idx_req, 1, MM_SET_TXPWR_IDX_LVL_CFM, NULL); + + return (error); +} + +int rwnx_send_txpwr_lvl_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_lvl_req *txpwr_lvl_req; + txpwr_lvl_conf_v2_t txpwr_lvl_v2_tmp; + txpwr_lvl_conf_v2_t *txpwr_lvl_v2; + txpwr_loss_conf_t txpwr_loss_tmp; + txpwr_loss_conf_t *txpwr_loss; + int error; + int i; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_LVL_REQ message */ + txpwr_lvl_req = rwnx_msg_zalloc(MM_SET_TXPWR_IDX_LVL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_lvl_req)); + + if (!txpwr_lvl_req) { + return -ENOMEM; + } + + txpwr_lvl_v2 = &txpwr_lvl_v2_tmp; + txpwr_loss = &txpwr_loss_tmp; + txpwr_loss->loss_enable_2g4 = 0; + + get_userconfig_txpwr_lvl_v2_in_fdrv(txpwr_lvl_v2); + get_userconfig_txpwr_loss(txpwr_loss); + if (txpwr_lvl_v2->enable == 0) { + rwnx_msg_free(rwnx_hw, txpwr_lvl_req); + return 0; + } else { + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v2->enable); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[11]); + + if (txpwr_loss->loss_enable_2g4 == 1) { + AICWFDBG(LOGINFO, "%s:loss_value_2g4: %d\r\n", __func__, + txpwr_loss->loss_value_2g4); + + for (i = 0; i <= 11; i++) + txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[i] -= txpwr_loss->loss_value_2g4; + for (i = 0; i <= 9; i++) + txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[i] -= txpwr_loss->loss_value_2g4; + for (i = 0; i <= 11; i++) + txpwr_lvl_v2->pwrlvl_11ax_2g4[i] -= txpwr_loss->loss_value_2g4; + } + + if ((testmode == 0) && (chip_sub_id == 0)) { + txpwr_lvl_req->txpwr_lvl.enable = txpwr_lvl_v2->enable; + txpwr_lvl_req->txpwr_lvl.dsss = txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[3]; // 11M + txpwr_lvl_req->txpwr_lvl.ofdmlowrate_2g4= txpwr_lvl_v2->pwrlvl_11ax_2g4[4]; // MCS4 + txpwr_lvl_req->txpwr_lvl.ofdm64qam_2g4 = txpwr_lvl_v2->pwrlvl_11ax_2g4[7]; // MCS7 + txpwr_lvl_req->txpwr_lvl.ofdm256qam_2g4 = txpwr_lvl_v2->pwrlvl_11ax_2g4[9]; // MCS9 + txpwr_lvl_req->txpwr_lvl.ofdm1024qam_2g4= txpwr_lvl_v2->pwrlvl_11ax_2g4[11]; // MCS11 + txpwr_lvl_req->txpwr_lvl.ofdmlowrate_5g = 13; // unused + txpwr_lvl_req->txpwr_lvl.ofdm64qam_5g = 13; // unused + txpwr_lvl_req->txpwr_lvl.ofdm256qam_5g = 13; // unused + txpwr_lvl_req->txpwr_lvl.ofdm1024qam_5g = 13; // unused + } else { + txpwr_lvl_req->txpwr_lvl_v2 = *txpwr_lvl_v2; + } + + /* Send the MM_SET_TXPWR_LVL_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_lvl_req, 1, MM_SET_TXPWR_IDX_LVL_CFM, NULL); + + return (error); + } +} + +int rwnx_send_txpwr_lvl_v3_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_lvl_req *txpwr_lvl_req; + txpwr_lvl_conf_v3_t txpwr_lvl_v3_tmp; + txpwr_lvl_conf_v3_t *txpwr_lvl_v3; + txpwr_loss_conf_t txpwr_loss_tmp; + txpwr_loss_conf_t *txpwr_loss; + int error; + int i; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_LVL_REQ message */ + txpwr_lvl_req = rwnx_msg_zalloc(MM_SET_TXPWR_IDX_LVL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_lvl_req)); + + if (!txpwr_lvl_req) { + return -ENOMEM; + } + + txpwr_lvl_v3 = &txpwr_lvl_v3_tmp; + txpwr_loss = &txpwr_loss_tmp; + txpwr_loss->loss_enable_2g4 = 0; + txpwr_loss->loss_enable_5g = 0; + + get_userconfig_txpwr_lvl_v3_in_fdrv(txpwr_lvl_v3); + get_userconfig_txpwr_loss(txpwr_loss); + + if (txpwr_loss->loss_enable_2g4 == 1) { + AICWFDBG(LOGINFO, "%s:loss_value_2g4: %d\r\n", __func__, + txpwr_loss->loss_value_2g4); + + for (i = 0; i <= 11; i++) + txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[i] -= txpwr_loss->loss_value_2g4; + for (i = 0; i <= 9; i++) + txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[i] -= txpwr_loss->loss_value_2g4; + for (i = 0; i <= 11; i++) + txpwr_lvl_v3->pwrlvl_11ax_2g4[i] -= txpwr_loss->loss_value_2g4; + } + + if (txpwr_loss->loss_enable_5g == 1) { + AICWFDBG(LOGINFO, "%s:loss_value_5g: %d\r\n", __func__, + txpwr_loss->loss_value_5g); + + for (i = 0; i <= 11; i++) + txpwr_lvl_v3->pwrlvl_11a_5g[i] -= txpwr_loss->loss_value_5g; + for (i = 0; i <= 9; i++) + txpwr_lvl_v3->pwrlvl_11n_11ac_5g[i] -= txpwr_loss->loss_value_5g; + for (i = 0; i <= 11; i++) + txpwr_lvl_v3->pwrlvl_11ax_5g[i] -= txpwr_loss->loss_value_5g; + } + + + if (txpwr_lvl_v3->enable == 0) { + rwnx_msg_free(rwnx_hw, txpwr_lvl_req); + return 0; + } else { + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v3->enable); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[11]); + + AICWFDBG(LOGINFO, "%s:lvl_11a_1m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11a_2m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11a_5m5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11a_11m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11a_6m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11a_9m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11a_12m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11a_18m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11a_24m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11a_36m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11a_48m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[10]); + AICWFDBG(LOGINFO, "%s:lvl_11a_54m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[11]); + + txpwr_lvl_req->txpwr_lvl_v3 = *txpwr_lvl_v3; + + /* Send the MM_SET_TXPWR_LVL_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_lvl_req, 1, MM_SET_TXPWR_IDX_LVL_CFM, NULL); + + return (error); + } +} + +int rwnx_send_txpwr_lvl_v4_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_lvl_req *txpwr_lvl_req; + txpwr_lvl_conf_v4_t txpwr_lvl_v4_tmp; + txpwr_lvl_conf_v4_t *txpwr_lvl_v4; + txpwr_loss_conf_t txpwr_loss_tmp; + txpwr_loss_conf_t *txpwr_loss; + int error; + int i; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_LVL_REQ message */ + txpwr_lvl_req = rwnx_msg_zalloc(MM_SET_TXPWR_IDX_LVL_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_lvl_req)); + + if (!txpwr_lvl_req) { + return -ENOMEM; + } + + txpwr_lvl_v4 = &txpwr_lvl_v4_tmp; + txpwr_loss = &txpwr_loss_tmp; + txpwr_loss->loss_enable_2g4 = 0; + txpwr_loss->loss_enable_5g = 0; + + get_userconfig_txpwr_lvl_v4_in_fdrv(txpwr_lvl_v4); + get_userconfig_txpwr_loss(txpwr_loss); + + if (txpwr_loss->loss_enable_2g4 == 1) { + AICWFDBG(LOGINFO, "%s:loss_value_2g4: %d\r\n", __func__, + txpwr_loss->loss_value_2g4); + + for (i = 0; i <= 11; i++) + txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[i] -= txpwr_loss->loss_value_2g4; + for (i = 0; i <= 9; i++) + txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[i] -= txpwr_loss->loss_value_2g4; + for (i = 0; i <= 11; i++) + txpwr_lvl_v4->pwrlvl_11ax_2g4[i] -= txpwr_loss->loss_value_2g4; + } + + if (txpwr_loss->loss_enable_5g == 1) { + AICWFDBG(LOGINFO, "%s:loss_value_5g: %d\r\n", __func__, + txpwr_loss->loss_value_5g); + + for (i = 0; i <= 7; i++) + txpwr_lvl_v4->pwrlvl_11a_5g[i] -= txpwr_loss->loss_value_5g; + for (i = 0; i <= 9; i++) + txpwr_lvl_v4->pwrlvl_11n_11ac_5g[i] -= txpwr_loss->loss_value_5g; + for (i = 0; i <= 11; i++) + txpwr_lvl_v4->pwrlvl_11ax_5g[i] -= txpwr_loss->loss_value_5g; + } + + + if (txpwr_lvl_v4->enable == 0) { + rwnx_msg_free(rwnx_hw, txpwr_lvl_req); + return 0; + } else { + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v4->enable); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[11]); + + AICWFDBG(LOGINFO, "%s:lvl_11a_6m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11a_9m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11a_12m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11a_18m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11a_24m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11a_36m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11a_48m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11a_54m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[11]); + + txpwr_lvl_req->txpwr_lvl_v4 = *txpwr_lvl_v4; + + /* Send the MM_SET_TXPWR_LVL_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_lvl_req, 1, MM_SET_TXPWR_IDX_LVL_CFM, NULL); + + return (error); + } +} + +#ifdef CONFIG_DYNAMIC_PERPWR +int rwnx_send_txpwr_per_sta_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) +{ + struct mm_set_txpwr_lvl_per_sta_req *req; + int ret; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + req = rwnx_msg_zalloc(MM_SET_TXPWR_PER_STA_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_lvl_per_sta_req)); + if (!req) { + AICWFDBG(LOGERROR, "%s msg alloc fail\n", __func__); + return -ENOMEM; + } + + AICWFDBG(LOGINFO, "sta_idx: %d, per_pwrloss: %d\r\n", sta->sta_idx, sta->per_pwrloss); + + req->sta_idx = sta->sta_idx; + req->tx_pwr_offset = sta->per_pwrloss; + + AICWFDBG(LOGDEBUG, "per_pwrloss: %d\n", req->tx_pwr_offset); + + ret = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_TXPWR_PER_STA_CFM, NULL); + + return ret; +} + +#endif + +int rwnx_send_txpwr_lvl_adj_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_lvl_adj_req *txpwr_lvl_adj_req; + txpwr_lvl_adj_conf_t txpwr_lvl_adj_tmp; + txpwr_lvl_adj_conf_t *txpwr_lvl_adj; + int error; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_LVL_REQ message */ + txpwr_lvl_adj_req = rwnx_msg_zalloc(MM_SET_TXPWR_LVL_ADJ_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_lvl_adj_req)); + + if (!txpwr_lvl_adj_req) { + return -ENOMEM; + } + + txpwr_lvl_adj = &txpwr_lvl_adj_tmp; + + get_userconfig_txpwr_lvl_adj_in_fdrv(txpwr_lvl_adj); + + if (txpwr_lvl_adj->enable == 0) { + rwnx_msg_free(rwnx_hw, txpwr_lvl_adj_req); + return 0; + } else { + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_adj->enable); + AICWFDBG(LOGINFO, "%s:lvl_adj_2g4_chan_1_4:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_adj_2g4_chan_5_9:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_adj_2g4_chan_10_13:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_2g4[2]); + + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_42:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_58:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_106:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_122:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_138:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_155:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[5]); + + txpwr_lvl_adj_req->txpwr_lvl_adj = *txpwr_lvl_adj; + + /* Send the MM_SET_TXPWR_LVL_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_lvl_adj_req, 1, MM_SET_TXPWR_LVL_ADJ_CFM, NULL); + + return (error); + } +} + +extern void get_userconfig_txpwr_ofst(txpwr_ofst_conf_t *txpwr_ofst); + +int rwnx_send_txpwr_ofst_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_ofst_req *txpwr_ofst_req; + txpwr_ofst_conf_t *txpwr_ofst; + int error = 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_OFST_REQ message */ + txpwr_ofst_req = rwnx_msg_zalloc(MM_SET_TXPWR_OFST_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_ofst_req)); + + if (!txpwr_ofst_req) { + return -ENOMEM; + } + + txpwr_ofst = &txpwr_ofst_req->txpwr_ofst; + txpwr_ofst->enable = 0; + txpwr_ofst->chan_1_4 = 0; + txpwr_ofst->chan_5_9 = 0; + txpwr_ofst->chan_10_13 = 0; + txpwr_ofst->chan_36_64 = 0; + txpwr_ofst->chan_100_120 = 0; + txpwr_ofst->chan_122_140 = 0; + txpwr_ofst->chan_142_165 = 0; + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){ + get_userconfig_txpwr_ofst(txpwr_ofst); + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ + get_userconfig_txpwr_ofst_in_fdrv(txpwr_ofst); + } + if(txpwr_ofst->enable){ + + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_ofst->enable); + AICWFDBG(LOGINFO, "%s:chan_1_4:%d\r\n", __func__, txpwr_ofst->chan_1_4); + AICWFDBG(LOGINFO, "%s:chan_5_9:%d\r\n", __func__, txpwr_ofst->chan_5_9); + AICWFDBG(LOGINFO, "%s:chan_10_13:%d\r\n", __func__, txpwr_ofst->chan_10_13); + AICWFDBG(LOGINFO, "%s:chan_36_64:%d\r\n", __func__, txpwr_ofst->chan_36_64); + AICWFDBG(LOGINFO, "%s:chan_100_120:%d\r\n", __func__, txpwr_ofst->chan_100_120); + AICWFDBG(LOGINFO, "%s:chan_122_140:%d\r\n", __func__, txpwr_ofst->chan_122_140); + AICWFDBG(LOGINFO, "%s:chan_142_165:%d\r\n", __func__, txpwr_ofst->chan_142_165); + + /* Send the MM_SET_TXPWR_OFST_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_ofst_req, 1, MM_SET_TXPWR_OFST_CFM, NULL); + }else{ + AICWFDBG(LOGINFO, "%s:Do not use txpwr_ofst\r\n", __func__); + rwnx_msg_free(rwnx_hw, txpwr_ofst_req); + } + + return (error); +} + +int rwnx_send_txpwr_ofst2x_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_ofst_req *txpwr_ofst_req; + txpwr_ofst2x_conf_t *txpwr_ofst2x; + int error = 0; + int type, ch_grp; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_OFST_REQ message */ + txpwr_ofst_req = rwnx_msg_zalloc(MM_SET_TXPWR_OFST_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_ofst_req)); + + if (!txpwr_ofst_req) { + return -ENOMEM; + } + + txpwr_ofst2x = &txpwr_ofst_req->txpwr_ofst2x; + txpwr_ofst2x->enable = 0; + for (type = 0; type < 3; type++) { + for (ch_grp = 0; ch_grp < 6; ch_grp++) { + if (ch_grp < 3) { + txpwr_ofst2x->pwrofst2x_tbl_2g4[type][ch_grp] = 0; + } + txpwr_ofst2x->pwrofst2x_tbl_5g[type][ch_grp] = 0; + } + } + if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){ + get_userconfig_txpwr_ofst2x_in_fdrv(txpwr_ofst2x); + } else if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2){ + get_userconfig_txpwr_ofst2x_in_fdrv(txpwr_ofst2x); + } + if (txpwr_ofst2x->enable){ + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_ofst2x->enable); + AICWFDBG(LOGINFO, "pwrofst2x 2.4g: [0]:11b, [1]:ofdm_highrate, [2]:ofdm_lowrate\n" + " chan=" "\t1-4" "\t5-9" "\t10-13"); + for (type = 0; type < 3; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 3; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d", txpwr_ofst2x->pwrofst2x_tbl_2g4[type][ch_grp]); + } + } + AICWFDBG(LOGINFO, "\npwrofst2x 5g: [0]:ofdm_lowrate, [1]:ofdm_highrate, [2]:ofdm_midrate\n" + " chan=" "\t36-50" "\t51-64" "\t98-114" "\t115-130" "\t131-146" "\t147-166"); + for (type = 0; type < 3; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 6; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d", txpwr_ofst2x->pwrofst2x_tbl_5g[type][ch_grp]); + } + } + AICWFDBG(LOGINFO, "\n"); + + /* Send the MM_SET_TXPWR_OFST_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_ofst_req, 1, MM_SET_TXPWR_OFST_CFM, NULL); + }else{ + AICWFDBG(LOGINFO, "%s:Do not use txpwr_ofst2x\r\n", __func__); + rwnx_msg_free(rwnx_hw, txpwr_ofst_req); + } + + return (error); +} + +int rwnx_send_txpwr_ofst2x_v2_req(struct rwnx_hw *rwnx_hw) +{ + struct mm_set_txpwr_ofst_req *txpwr_ofst_req; + txpwr_ofst2x_conf_v2_t *txpwr_ofst2x_v2; + int error = 0; + int type, ch_grp; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_TXPWR_OFST_REQ message */ + txpwr_ofst_req = rwnx_msg_zalloc(MM_SET_TXPWR_OFST_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_txpwr_ofst_req)); + + if (!txpwr_ofst_req) { + return -ENOMEM; + } + + txpwr_ofst2x_v2 = &txpwr_ofst_req->txpwr_ofst2x_v2; + txpwr_ofst2x_v2->enable = 0; + + get_userconfig_txpwr_ofst2x_v2_in_fdrv(txpwr_ofst2x_v2); + + if (txpwr_ofst2x_v2->enable){ + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_ofst2x_v2->enable); + AICWFDBG(LOGINFO, "pwrofst2x 2.4g(ant0/ant1): [0]:11b, [1]:ofdm_highrate\n" + " chan=" "\t1-4" "\t5-9" "\t10-13"); + for (type = 0; type < 2; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 3; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d/%d", txpwr_ofst2x_v2->pwrofst2x_tbl_2g4_ant0[ch_grp][type], txpwr_ofst2x_v2->pwrofst2x_tbl_2g4_ant1[ch_grp][type]); + } + } + AICWFDBG(LOGINFO, "\npwrofst2x 5g(ant0/ant1): [0]:ofdm_highrate\n" + " chan=" "\t36-50" "\t51-64" "\t98-114" "\t115-130" "\t131-146" "\t147-166"); + for (type = 0; type < 1; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 6; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d/%d", txpwr_ofst2x_v2->pwrofst2x_tbl_5g_ant0[ch_grp][type], txpwr_ofst2x_v2->pwrofst2x_tbl_5g_ant1[ch_grp][type]); + } + } + AICWFDBG(LOGINFO, "\n"); + + /* Send the MM_SET_TXPWR_OFST_REQ message to UMAC FW */ + error = rwnx_send_msg(rwnx_hw, txpwr_ofst_req, 1, MM_SET_TXPWR_OFST_CFM, NULL); + }else{ + AICWFDBG(LOGINFO, "%s:Do not use txpwr_ofst2x_v2\r\n", __func__); + rwnx_msg_free(rwnx_hw, txpwr_ofst_req); + } + + return (error); +} + +int rwnx_send_set_filter(struct rwnx_hw *rwnx_hw, uint32_t filter) +{ + struct mm_set_filter_req *set_filter_req_param; + uint32_t rx_filter = 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_SET_FILTER_REQ message */ + set_filter_req_param = + rwnx_msg_zalloc(MM_SET_FILTER_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_filter_req)); + if (!set_filter_req_param) + return -ENOMEM; + + /* Set parameters for the MM_SET_FILTER_REQ message */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) + if (filter & FIF_PROMISC_IN_BSS) + rx_filter |= NXMAC_ACCEPT_UNICAST_BIT; +#endif + if (filter & FIF_ALLMULTI) + rx_filter |= NXMAC_ACCEPT_MULTICAST_BIT; + + if (filter & (FIF_FCSFAIL | FIF_PLCPFAIL)) + rx_filter |= NXMAC_ACCEPT_ERROR_FRAMES_BIT; + + if (filter & FIF_BCN_PRBRESP_PROMISC) + rx_filter |= NXMAC_ACCEPT_OTHER_BSSID_BIT; + + if (filter & FIF_CONTROL) + rx_filter |= NXMAC_ACCEPT_OTHER_CNTRL_FRAMES_BIT | + NXMAC_ACCEPT_CF_END_BIT | + NXMAC_ACCEPT_ACK_BIT | + NXMAC_ACCEPT_CTS_BIT | + NXMAC_ACCEPT_RTS_BIT | + NXMAC_ACCEPT_BA_BIT | NXMAC_ACCEPT_BAR_BIT; + + if (filter & FIF_OTHER_BSS) + rx_filter |= NXMAC_ACCEPT_OTHER_BSSID_BIT; + + if (filter & FIF_PSPOLL) { + /* TODO: check if the MAC filters apply to our BSSID or is general */ + rx_filter |= NXMAC_ACCEPT_PS_POLL_BIT; + } + + if (filter & FIF_PROBE_REQ) { + rx_filter |= NXMAC_ACCEPT_PROBE_REQ_BIT; + rx_filter |= NXMAC_ACCEPT_ALL_BEACON_BIT; + } + + /* Add the filter flags that are set by default and cannot be changed here */ + rx_filter |= RWNX_MAC80211_NOT_CHANGEABLE; + + /* XXX */ + //if (ieee80211_hw_check(rwnx_hw->hw, AMPDU_AGGREGATION)) + rx_filter |= NXMAC_ACCEPT_BA_BIT; + + /* Now copy all the flags into the message parameter */ + set_filter_req_param->filter = rx_filter; + + RWNX_DBG("new total_flags = 0x%08x\nrx filter set to 0x%08x\n", + filter, rx_filter); + + /* Send the MM_SET_FILTER_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, set_filter_req_param, 1, MM_SET_FILTER_CFM, NULL); +} + +/****************************************************************************** + * Control messages handling functions (FULLMAC only) + *****************************************************************************/ +#ifdef CONFIG_RWNX_FULLMAC +#ifdef CONFIG_HE_FOR_OLD_KERNEL +extern struct ieee80211_sband_iftype_data rwnx_he_capa; +#endif +#ifdef CONFIG_VHT_FOR_OLD_KERNEL +static struct ieee80211_sta_vht_cap* rwnx_vht_capa; +#endif +int rwnx_send_me_config_req(struct rwnx_hw *rwnx_hw) +{ + struct me_config_req *req; + struct wiphy *wiphy = rwnx_hw->wiphy; + + //#ifdef USE_5G + //struct ieee80211_sta_ht_cap *ht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap; + //struct ieee80211_sta_vht_cap *vht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap; + //#else + //struct ieee80211_sta_ht_cap *ht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap; + //struct ieee80211_sta_vht_cap *vht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap; + //#endif + struct ieee80211_sta_ht_cap *ht_cap; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) + struct ieee80211_sta_vht_cap *vht_cap; + #endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + struct ieee80211_sta_he_cap const *he_cap; +#else + #ifdef CONFIG_HE_FOR_OLD_KERNEL + struct ieee80211_sta_he_cap const *he_cap; + #endif +#endif + //uint8_t *ht_mcs = (uint8_t *)&ht_cap->mcs; + uint8_t *ht_mcs; + int i; + + if (rwnx_hw->band_5g_support) { + ht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) + vht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap; + #endif + } else { + ht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) + vht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap; + #endif + } + #ifdef CONFIG_VHT_FOR_OLD_KERNEL + rwnx_vht_capa = vht_cap; + #endif + + ht_mcs = (uint8_t *)&ht_cap->mcs; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_CONFIG_REQ message */ + req = rwnx_msg_zalloc(ME_CONFIG_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_config_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the ME_CONFIG_REQ message */ + req->ht_supp = ht_cap->ht_supported; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) + req->vht_supp = vht_cap->vht_supported; + #endif + req->ht_cap.ht_capa_info = cpu_to_le16(ht_cap->cap | IEEE80211_HT_CAP_LDPC_CODING); + req->ht_cap.a_mpdu_param = ht_cap->ampdu_factor | + (ht_cap->ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + for (i = 0; i < sizeof(ht_cap->mcs); i++) + req->ht_cap.mcs_rate[i] = ht_mcs[i]; + req->ht_cap.ht_extended_capa = 0; + req->ht_cap.tx_beamforming_capa = 0; + req->ht_cap.asel_capa = 0; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL) + if(req->vht_supp) { + req->vht_cap.vht_capa_info = cpu_to_le32(vht_cap->cap); + req->vht_cap.rx_highest = cpu_to_le16(vht_cap->vht_mcs.rx_highest); + req->vht_cap.rx_mcs_map = cpu_to_le16(vht_cap->vht_mcs.rx_mcs_map); + req->vht_cap.tx_highest = cpu_to_le16(vht_cap->vht_mcs.tx_highest); + req->vht_cap.tx_mcs_map = cpu_to_le16(vht_cap->vht_mcs.tx_mcs_map); + } + #endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) || defined(CONFIG_HE_FOR_OLD_KERNEL) + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + if (wiphy->bands[NL80211_BAND_2GHZ]->iftype_data != NULL) { + he_cap = &wiphy->bands[NL80211_BAND_2GHZ]->iftype_data->he_cap; + #endif + #if defined(CONFIG_HE_FOR_OLD_KERNEL) + if (1) { + he_cap = &rwnx_he_capa.he_cap; + #endif + + req->he_supp = he_cap->has_he; + + for (i = 0; i < ARRAY_SIZE(he_cap->he_cap_elem.mac_cap_info); i++) { + req->he_cap.mac_cap_info[i] = he_cap->he_cap_elem.mac_cap_info[i]; + } + for (i = 0; i < ARRAY_SIZE(he_cap->he_cap_elem.phy_cap_info); i++) { + req->he_cap.phy_cap_info[i] = he_cap->he_cap_elem.phy_cap_info[i]; + } + req->he_cap.mcs_supp.rx_mcs_80 = cpu_to_le16(he_cap->he_mcs_nss_supp.rx_mcs_80); + req->he_cap.mcs_supp.tx_mcs_80 = cpu_to_le16(he_cap->he_mcs_nss_supp.tx_mcs_80); + req->he_cap.mcs_supp.rx_mcs_160 = cpu_to_le16(he_cap->he_mcs_nss_supp.rx_mcs_160); + req->he_cap.mcs_supp.tx_mcs_160 = cpu_to_le16(he_cap->he_mcs_nss_supp.tx_mcs_160); + req->he_cap.mcs_supp.rx_mcs_80p80 = cpu_to_le16(he_cap->he_mcs_nss_supp.rx_mcs_80p80); + req->he_cap.mcs_supp.tx_mcs_80p80 = cpu_to_le16(he_cap->he_mcs_nss_supp.tx_mcs_80p80); + for (i = 0; i < MAC_HE_PPE_THRES_MAX_LEN; i++) { + req->he_cap.ppe_thres[i] = he_cap->ppe_thres[i]; + } + req->he_ul_on = rwnx_hw->mod_params->he_ul_on; + } + +#else + req->he_supp = false; + req->he_ul_on = false; +#endif + req->ps_on = rwnx_hw->mod_params->ps_on; + req->dpsm = rwnx_hw->mod_params->dpsm; + req->tx_lft = rwnx_hw->mod_params->tx_lft; + req->ant_div_on = rwnx_hw->mod_params->ant_div; + + if (rwnx_hw->mod_params->use_80){ + req->phy_bw_max = PHY_CHNL_BW_80; + }else if (rwnx_hw->mod_params->use_2040){ + req->phy_bw_max = PHY_CHNL_BW_40; + }else{ + req->phy_bw_max = PHY_CHNL_BW_20; + } + +#if 0 + if(!rwnx_hw->he_flag){ + req->he_supp = false; + req->he_ul_on = false; + } + + wiphy_info(wiphy, "HT supp %d, VHT supp %d, HE supp %d\n", req->ht_supp, + req->vht_supp, + req->he_supp); +#endif + + AICWFDBG(LOGINFO, "HT supp %d, VHT supp %d, HE supp %d\n", req->ht_supp, + req->vht_supp, + req->he_supp); + + /* Send the ME_CONFIG_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_CONFIG_CFM, NULL); +} + +int rwnx_send_me_chan_config_req(struct rwnx_hw *rwnx_hw, char *ccode) +{ + struct me_chan_config_req *req; + struct wiphy *wiphy = rwnx_hw->wiphy; + int i; +#ifdef CONFIG_POWER_LIMIT + int8_t max_pwr; + uint8_t r_idx; + txpwr_loss_conf_t txpwr_loss_tmp; + txpwr_loss_conf_t *txpwr_loss; +#endif + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_CHAN_CONFIG_REQ message */ + req = rwnx_msg_zalloc(ME_CHAN_CONFIG_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_chan_config_req)); + if (!req) + return -ENOMEM; + + +#ifdef CONFIG_POWER_LIMIT + txpwr_loss = &txpwr_loss_tmp; + get_userconfig_txpwr_loss(txpwr_loss); + + if (txpwr_loss->loss_enable_2g4 == 1) + AICWFDBG(LOGINFO, "%s:loss_value_2g4: %d\r\n", __func__, + txpwr_loss->loss_value_2g4); + if (txpwr_loss->loss_enable_5g == 1) + AICWFDBG(LOGINFO, "%s:loss_value_5g: %d\r\n", __func__, + txpwr_loss->loss_value_5g); + + r_idx = get_ccode_region(ccode); +#endif + + req->chan2G4_cnt= 0; + if (wiphy->bands[NL80211_BAND_2GHZ] != NULL) { + struct ieee80211_supported_band *b = wiphy->bands[NL80211_BAND_2GHZ]; + for (i = 0; i < b->n_channels; i++) { + req->chan2G4[req->chan2G4_cnt].flags = 0; + if (b->channels[i].flags & IEEE80211_CHAN_DISABLED) + req->chan2G4[req->chan2G4_cnt].flags |= CHAN_DISABLED; + req->chan2G4[req->chan2G4_cnt].flags |= get_chan_flags(b->channels[i].flags); + req->chan2G4[req->chan2G4_cnt].band = NL80211_BAND_2GHZ; + req->chan2G4[req->chan2G4_cnt].freq = b->channels[i].center_freq; + req->chan2G4[req->chan2G4_cnt].tx_power = chan_to_fw_pwr(b->channels[i].max_power); + #ifdef CONFIG_POWER_LIMIT + { + max_pwr = get_powerlimit_by_freq(PHY_BAND_2G4, req->chan2G4[req->chan2G4_cnt].freq, r_idx); + if (txpwr_loss->loss_enable_2g4 == 1) + max_pwr -= txpwr_loss->loss_value_2g4; + if (req->chan2G4[req->chan2G4_cnt].tx_power > max_pwr) { + req->chan2G4[req->chan2G4_cnt].tx_power = max_pwr; + } + } + #endif + req->chan2G4_cnt++; + if (req->chan2G4_cnt == MAC_DOMAINCHANNEL_24G_MAX) + break; + } + } + + req->chan5G_cnt = 0; + if (wiphy->bands[NL80211_BAND_5GHZ] != NULL) { + struct ieee80211_supported_band *b = wiphy->bands[NL80211_BAND_5GHZ]; + for (i = 0; i < b->n_channels; i++) { + req->chan5G[req->chan5G_cnt].flags = 0; + if (b->channels[i].flags & IEEE80211_CHAN_DISABLED) + req->chan5G[req->chan5G_cnt].flags |= CHAN_DISABLED; + req->chan5G[req->chan5G_cnt].flags |= get_chan_flags(b->channels[i].flags); + req->chan5G[req->chan5G_cnt].band = NL80211_BAND_5GHZ; + req->chan5G[req->chan5G_cnt].freq = b->channels[i].center_freq; + req->chan5G[req->chan5G_cnt].tx_power = chan_to_fw_pwr(b->channels[i].max_power); + #ifdef CONFIG_POWER_LIMIT + { + max_pwr = get_powerlimit_by_freq(PHY_BAND_5G, req->chan5G[req->chan5G_cnt].freq, r_idx); + if (txpwr_loss->loss_enable_5g == 1) + max_pwr -= txpwr_loss->loss_value_5g; + if (req->chan5G[req->chan5G_cnt].tx_power > max_pwr) { + req->chan5G[req->chan5G_cnt].tx_power = max_pwr; + } + } + #endif + req->chan5G_cnt++; + if (req->chan5G_cnt == MAC_DOMAINCHANNEL_5G_MAX) + break; + } + } + + /* Send the ME_CHAN_CONFIG_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_CHAN_CONFIG_CFM, NULL); +} + +int rwnx_send_me_set_control_port_req(struct rwnx_hw *rwnx_hw, bool opened, u8 sta_idx) +{ + struct me_set_control_port_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_SET_CONTROL_PORT_REQ message */ + req = rwnx_msg_zalloc(ME_SET_CONTROL_PORT_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_set_control_port_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the ME_SET_CONTROL_PORT_REQ message */ + req->sta_idx = sta_idx; + req->control_port_open = opened; + + /* Send the ME_SET_CONTROL_PORT_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_SET_CONTROL_PORT_CFM, NULL); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) +struct ieee80211_he_cap_elem_4_19 { + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; +} __packed; +#endif +int rwnx_send_me_sta_add(struct rwnx_hw *rwnx_hw, struct station_parameters *params, + const u8 *mac, u8 inst_nbr, struct me_sta_add_cfm *cfm) +{ + struct me_sta_add_req *req; + +#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION + struct link_station_parameters *link_sta_params = ¶ms->link_sta_params; +#else + struct station_parameters *link_sta_params = params; +#endif + + u8 *ht_mcs = (u8 *)&link_sta_params->ht_capa->mcs; + + int i; + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[inst_nbr]; + #if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL) + struct aic_sta *sta = &rwnx_hw->aic_table[rwnx_vif->ap.aic_index]; + printk("assoc_req idx %d, he: %d, vht: %d\n ", rwnx_vif->ap.aic_index, sta->he, sta->vht); + if (rwnx_vif->ap.aic_index < NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX) + rwnx_vif->ap.aic_index++; + else + rwnx_vif->ap.aic_index = 0; + #endif + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#ifdef CONFIG_RADAR_OR_IR_DETECT + if((rwnx_hw->radar.sta_num == 0) && (rwnx_hw->radar.status != RWNX_RADAR_INSERVICE_BUSY)) + rwnx_hw->radar.status = RWNX_RADAR_INSERVICE_BUSY; + + AICWFDBG(LOGINFO, "DFS: assoc add num %d, %d\n", rwnx_hw->radar.sta_num, rwnx_hw->radar.status); + if(rwnx_hw->radar.sta_num < 0x0FFF) + rwnx_hw->radar.sta_num++; +#endif + + /* Build the MM_STA_ADD_REQ message */ + req = rwnx_msg_zalloc(ME_STA_ADD_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_sta_add_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the MM_STA_ADD_REQ message */ + memcpy(&(req->mac_addr.array[0]), mac, ETH_ALEN); + + req->rate_set.length = link_sta_params->supported_rates_len;; + for (i = 0; i < link_sta_params->supported_rates_len; i++) + req->rate_set.array[i] = link_sta_params->supported_rates[i]; + + req->flags = 0; + + if (link_sta_params->ht_capa) { + const struct ieee80211_ht_cap *ht_capa = link_sta_params->ht_capa; + + req->flags |= STA_HT_CAPA; + req->ht_cap.ht_capa_info = cpu_to_le16(ht_capa->cap_info); + req->ht_cap.a_mpdu_param = ht_capa->ampdu_params_info; + for (i = 0; i < sizeof(ht_capa->mcs); i++) + req->ht_cap.mcs_rate[i] = ht_mcs[i]; + req->ht_cap.ht_extended_capa = cpu_to_le16(ht_capa->extended_ht_cap_info); + req->ht_cap.tx_beamforming_capa = cpu_to_le32(ht_capa->tx_BF_cap_info); + req->ht_cap.asel_capa = ht_capa->antenna_selection_info; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + if (link_sta_params->vht_capa) { + const struct ieee80211_vht_cap *vht_capa = link_sta_params->vht_capa; + + req->flags |= STA_VHT_CAPA; + req->vht_cap.vht_capa_info = cpu_to_le32(vht_capa->vht_cap_info); + req->vht_cap.rx_highest = cpu_to_le16(vht_capa->supp_mcs.rx_highest); + req->vht_cap.rx_mcs_map = cpu_to_le16(vht_capa->supp_mcs.rx_mcs_map); + req->vht_cap.tx_highest = cpu_to_le16(vht_capa->supp_mcs.tx_highest); + req->vht_cap.tx_mcs_map = cpu_to_le16(vht_capa->supp_mcs.tx_mcs_map); + } +#elif defined(CONFIG_VHT_FOR_OLD_KERNEL) + if (sta->vht) { + //const struct ieee80211_vht_cap *vht_capa = rwnx_vht_capa; + + req->flags |= STA_VHT_CAPA; + req->vht_cap.vht_capa_info = cpu_to_le32(rwnx_vht_capa->cap); + req->vht_cap.rx_highest = sta->supp_mcs.rx_highest;//cpu_to_le16(rwnx_vht_capa->vht_mcs.rx_highest); + req->vht_cap.rx_mcs_map = sta->supp_mcs.rx_mcs_map;//cpu_to_le16(rwnx_vht_capa->vht_mcs.rx_mcs_map); + req->vht_cap.tx_highest = sta->supp_mcs.tx_highest;//cpu_to_le16(rwnx_vht_capa->vht_mcs.tx_highest); + req->vht_cap.tx_mcs_map = sta->supp_mcs.tx_mcs_map;//cpu_to_le16(rwnx_vht_capa->vht_mcs.tx_mcs_map); + } +#endif + + AICWFDBG(LOGDEBUG,"rx map %x rx high %x tx map %x tx high %x \n",req->vht_cap.rx_mcs_map,req->vht_cap.rx_highest,req->vht_cap.tx_mcs_map,req->vht_cap.tx_highest); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) + if (link_sta_params->he_capa) { + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) + const struct ieee80211_he_cap_elem_4_19 *he_capa = (const struct ieee80211_he_cap_elem_4_19 *) link_sta_params->he_capa; + #else + const struct ieee80211_he_cap_elem *he_capa = link_sta_params->he_capa; + #endif + struct ieee80211_he_mcs_nss_supp *mcs_nss_supp = + (struct ieee80211_he_mcs_nss_supp *)(he_capa + 1); + + req->flags |= STA_HE_CAPA; + for (i = 0; i < ARRAY_SIZE(he_capa->mac_cap_info); i++) { + req->he_cap.mac_cap_info[i] = he_capa->mac_cap_info[i]; + } + for (i = 0; i < ARRAY_SIZE(he_capa->phy_cap_info); i++) { + req->he_cap.phy_cap_info[i] = he_capa->phy_cap_info[i]; + } + req->he_cap.mcs_supp.rx_mcs_80 = mcs_nss_supp->rx_mcs_80; + req->he_cap.mcs_supp.tx_mcs_80 = mcs_nss_supp->tx_mcs_80; + req->he_cap.mcs_supp.rx_mcs_160 = mcs_nss_supp->rx_mcs_160; + req->he_cap.mcs_supp.tx_mcs_160 = mcs_nss_supp->tx_mcs_160; + req->he_cap.mcs_supp.rx_mcs_80p80 = mcs_nss_supp->rx_mcs_80p80; + req->he_cap.mcs_supp.tx_mcs_80p80 = mcs_nss_supp->tx_mcs_80p80; + } +#else + #ifdef CONFIG_HE_FOR_OLD_KERNEL + if (sta->he) { + const struct ieee80211_he_cap_elem *he_capa = &rwnx_he_capa.he_cap.he_cap_elem; + struct ieee80211_he_mcs_nss_supp *mcs_nss_supp = + (struct ieee80211_he_mcs_nss_supp *)(he_capa + 1); + req->flags |= STA_HE_CAPA; + for (i = 0; i < ARRAY_SIZE(he_capa->mac_cap_info); i++) { + req->he_cap.mac_cap_info[i] = sta->he_cap_elem.mac_cap_info[i];//he_capa->mac_cap_info[i]; + } + for (i = 0; i < ARRAY_SIZE(he_capa->phy_cap_info); i++) { + req->he_cap.phy_cap_info[i] = sta->he_cap_elem.phy_cap_info[i];//he_capa->phy_cap_info[i]; + } + req->he_cap.mcs_supp.rx_mcs_80 = sta->he_mcs_nss_supp.rx_mcs_80;//mcs_nss_supp->rx_mcs_80; + req->he_cap.mcs_supp.tx_mcs_80 = sta->he_mcs_nss_supp.tx_mcs_80;//mcs_nss_supp->tx_mcs_80; + req->he_cap.mcs_supp.rx_mcs_160 = mcs_nss_supp->rx_mcs_160; + req->he_cap.mcs_supp.tx_mcs_160 = mcs_nss_supp->tx_mcs_160; + req->he_cap.mcs_supp.rx_mcs_80p80 = mcs_nss_supp->rx_mcs_80p80; + req->he_cap.mcs_supp.tx_mcs_80p80 = mcs_nss_supp->tx_mcs_80p80; + } + #endif +#endif + + AICWFDBG(LOGDEBUG,"rwnx sta add he mcs/nss rx mcs 80 0x%04x \n",le16_to_cpu(req->he_cap.mcs_supp.rx_mcs_80)); + AICWFDBG(LOGDEBUG,"rwnx sta add he mcs/nss tx mcs 80 0x%04x \n",le16_to_cpu(req->he_cap.mcs_supp.tx_mcs_80)); + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_WME)) + req->flags |= STA_QOS_CAPA; + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_MFP)) + req->flags |= STA_MFP_CAPA; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + if (link_sta_params->opmode_notif_used) { + req->opmode = link_sta_params->opmode_notif_used; + req->flags |= STA_OPMOD_NOTIF; + } + #endif + + req->aid = cpu_to_le16(params->aid); + req->uapsd_queues = params->uapsd_queues; + req->max_sp_len = params->max_sp * 2; + req->vif_idx = inst_nbr; + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) { + //struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[inst_nbr]; + req->tdls_sta = true; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + if ((params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) && + !rwnx_vif->tdls_chsw_prohibited) + req->tdls_chsw_allowed = true; +#endif + if (rwnx_vif->tdls_status == TDLS_SETUP_RSP_TX) + req->tdls_sta_initiator = true; + } + + /* Send the ME_STA_ADD_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_STA_ADD_CFM, cfm); +} + +int rwnx_send_me_sta_del(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool tdls_sta) +{ + struct me_sta_del_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_STA_DEL_REQ message */ + req = rwnx_msg_zalloc(ME_STA_DEL_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_sta_del_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the MM_STA_DEL_REQ message */ + req->sta_idx = sta_idx; + req->tdls_sta = tdls_sta; + +#ifdef CONFIG_RADAR_OR_IR_DETECT + if((rwnx_hw->radar.sta_num == 1) && (rwnx_hw->radar.status == RWNX_RADAR_INSERVICE_BUSY)) + rwnx_hw->radar.status = RWNX_RADAR_INSERVICE_DONE; + + AICWFDBG(LOGINFO, "DFS: assoc del num %d, %d\n", rwnx_hw->radar.sta_num, rwnx_hw->radar.status); + if(rwnx_hw->radar.sta_num > 0) + rwnx_hw->radar.sta_num--; +#endif + + /* Send the ME_STA_DEL_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_STA_DEL_CFM, NULL); +} + +int rwnx_send_me_traffic_ind(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool uapsd, u8 tx_status) +{ + struct me_traffic_ind_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_UTRAFFIC_IND_REQ message */ + req = rwnx_msg_zalloc(ME_TRAFFIC_IND_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_traffic_ind_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the ME_TRAFFIC_IND_REQ message */ + req->sta_idx = sta_idx; + req->tx_avail = tx_status; + req->uapsd = uapsd; + + /* Send the ME_TRAFFIC_IND_REQ to UMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_TRAFFIC_IND_CFM, NULL); +} + +int rwnx_send_me_rc_stats(struct rwnx_hw *rwnx_hw, + u8 sta_idx, + struct me_rc_stats_cfm *cfm) +{ + struct me_rc_stats_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_RC_STATS_REQ message */ + req = rwnx_msg_zalloc(ME_RC_STATS_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_rc_stats_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the ME_RC_STATS_REQ message */ + req->sta_idx = sta_idx; + + /* Send the ME_RC_STATS_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_RC_STATS_CFM, cfm); +} + +int rwnx_send_me_rc_set_rate(struct rwnx_hw *rwnx_hw, + u8 sta_idx, + u16 rate_cfg) +{ + struct me_rc_set_rate_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_RC_SET_RATE_REQ message */ + req = rwnx_msg_zalloc(ME_RC_SET_RATE_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_rc_set_rate_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the ME_RC_SET_RATE_REQ message */ + req->sta_idx = sta_idx; + req->fixed_rate_cfg = rate_cfg; + + /* Send the ME_RC_SET_RATE_REQ message to FW */ + return rwnx_send_msg(rwnx_hw, req, 0, 0, NULL); +} + +int rwnx_send_me_set_ps_mode(struct rwnx_hw *rwnx_hw, u8 ps_mode) +{ + struct me_set_ps_mode_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_SET_PS_MODE_REQ message */ + req = rwnx_msg_zalloc(ME_SET_PS_MODE_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_set_ps_mode_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the ME_SET_PS_MODE_REQ message */ + req->ps_state = ps_mode; + + /* Send the ME_SET_PS_MODE_REQ message to FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_SET_PS_MODE_CFM, NULL); +} + +int rwnx_send_me_set_lp_level(struct rwnx_hw *rwnx_hw, u8 lp_level) +{ + struct me_set_lp_level_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_SET_LP_LEVEL_REQ message */ + req = rwnx_msg_zalloc(ME_SET_LP_LEVEL_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_set_lp_level_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the ME_SET_LP_LEVEL_REQ message */ + req->lp_level = lp_level; + + /* Send the ME_SET_LP_LEVEL_REQ message to FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_SET_LP_LEVEL_CFM, NULL); +} + +int rwnx_send_sm_connect_req(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + struct cfg80211_connect_params *sme, + struct sm_connect_cfm *cfm) +{ + struct sm_connect_req *req; + int i; + u32_l flags = 0; + bool gval = false; + bool pval = false; + + rwnx_vif->wep_enabled = false; + rwnx_vif->wep_auth_err = false; + rwnx_vif->last_auth_type = 0; + + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the SM_CONNECT_REQ message */ + req = rwnx_msg_zalloc(SM_CONNECT_REQ, TASK_SM, DRV_TASK_ID, + sizeof(struct sm_connect_req)); + if (!req) + return -ENOMEM; + + if ((sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) || + (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104)) { + gval = true; + } + + if (sme->crypto.n_ciphers_pairwise && + ((sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP40) || + (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP104))) { + pval = true; + } + + /* Set parameters for the SM_CONNECT_REQ message */ + if (sme->crypto.n_ciphers_pairwise && + ((sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP40) || + (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_TKIP) || + (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP104))) + flags |= DISABLE_HT; + + if (sme->crypto.control_port) + flags |= CONTROL_PORT_HOST; + + if (sme->crypto.control_port_no_encrypt) + flags |= CONTROL_PORT_NO_ENC; + + if (use_pairwise_key(&sme->crypto)) + flags |= WPA_WPA2_IN_USE; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) || defined (CONFIG_WPA3_FOR_OLD_KERNEL) + if (sme->mfp == NL80211_MFP_REQUIRED) + flags |= MFP_IN_USE; +#endif + if (rwnx_vif->sta.ap) + flags |= REASSOCIATION; + + req->ctrl_port_ethertype = sme->crypto.control_port_ethertype; + + if (sme->bssid) + memcpy(&req->bssid, sme->bssid, ETH_ALEN); + else + req->bssid = mac_addr_bcst; + req->vif_idx = rwnx_vif->vif_index; + if (sme->channel) { + req->chan.band = sme->channel->band; + req->chan.freq = sme->channel->center_freq; + req->chan.flags = get_chan_flags(sme->channel->flags); + } else { + req->chan.freq = (u16_l)-1; + } + for (i = 0; i < sme->ssid_len; i++) + req->ssid.array[i] = sme->ssid[i]; + req->ssid.length = sme->ssid_len; + + req->flags = flags; + if (WARN_ON(sme->ie_len > sizeof(req->ie_buf))) + goto invalid_param; + if (sme->ie_len) + memcpy(req->ie_buf, sme->ie, sme->ie_len); + req->ie_len = sme->ie_len; + req->listen_interval = rwnx_mod_params.listen_itv; + req->dont_wait_bcmc = !rwnx_mod_params.listen_bcmc; + + /* Set auth_type */ + if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) + req->auth_type = WLAN_AUTH_OPEN; + else if (sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) + req->auth_type = WLAN_AUTH_OPEN; + else if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY) + req->auth_type = WLAN_AUTH_SHARED_KEY; + else if (sme->auth_type == NL80211_AUTHTYPE_FT) + req->auth_type = WLAN_AUTH_FT; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) || defined (CONFIG_WPA3_FOR_OLD_KERNEL) + else if (sme->auth_type == NL80211_AUTHTYPE_SAE) + req->auth_type = WLAN_AUTH_SAE; +#endif + else + goto invalid_param; + + /* Set UAPSD queues */ + req->uapsd_queues = rwnx_mod_params.uapsd_queues; + + rwnx_vif->wep_enabled = pval & gval; + + if (rwnx_vif->wep_enabled) { + rwnx_vif->last_auth_type = sme->auth_type; + } +#ifdef CONFIG_USE_WIRELESS_EXT + memset(rwnx_hw->wext_essid, 0, 33); + memcpy(rwnx_hw->wext_essid, sme->ssid, (int)sme->ssid_len); +#endif + + rwnx_vif->sta.ssid_len = (int)sme->ssid_len; + memset(rwnx_vif->sta.ssid, 0, rwnx_vif->sta.ssid_len + 1); + memcpy(rwnx_vif->sta.ssid, sme->ssid, rwnx_vif->sta.ssid_len); + if (sme->bssid) + memcpy(rwnx_vif->sta.bssid, sme->bssid, ETH_ALEN); + + AICWFDBG(LOGINFO, "%s drv_vif_index:%d connect to %s(%d) channel:%d auth_type:%d\r\n", + __func__, + rwnx_vif->drv_vif_index, + rwnx_vif->sta.ssid, + rwnx_vif->sta.ssid_len, + req->chan.freq, + req->auth_type); + + /* Send the SM_CONNECT_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, SM_CONNECT_CFM, cfm); + +invalid_param: + rwnx_msg_free(rwnx_hw, req); + return -EINVAL; +} + +int rwnx_send_sm_disconnect_req(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + u16 reason) +{ + struct sm_disconnect_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the SM_DISCONNECT_REQ message */ + req = rwnx_msg_zalloc(SM_DISCONNECT_REQ, TASK_SM, DRV_TASK_ID, + sizeof(struct sm_disconnect_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the SM_DISCONNECT_REQ message */ + req->reason_code = reason; + req->vif_idx = rwnx_vif->vif_index; + + /* Send the SM_DISCONNECT_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, SM_DISCONNECT_CFM, NULL); +} + +int rwnx_send_sm_external_auth_required_rsp(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + u16 status) +{ + struct sm_external_auth_required_rsp *rsp; + + /* Build the SM_EXTERNAL_AUTH_CFM message */ + rsp = rwnx_msg_zalloc(SM_EXTERNAL_AUTH_REQUIRED_RSP, TASK_SM, DRV_TASK_ID, + sizeof(struct sm_external_auth_required_rsp)); + if (!rsp) + return -ENOMEM; + + rsp->status = status; + rsp->vif_idx = rwnx_vif->vif_index; + + /* send the SM_EXTERNAL_AUTH_REQUIRED_RSP message UMAC FW */ + return rwnx_send_msg(rwnx_hw, rsp, 1, SM_EXTERNAL_AUTH_REQUIRED_RSP_CFM, NULL); +} + +int rwnx_send_apm_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct cfg80211_ap_settings *settings, + struct apm_start_cfm *cfm, + struct rwnx_ipc_elem_var *elem) +{ + struct apm_start_req *req; + struct rwnx_bcn *bcn = &vif->ap.bcn; + u8 *buf; + u32 flags = 0; + const u8 *rate_ie; + u8 rate_len = 0; + int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable); + const u8 *var_pos; + int len, i; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the APM_START_REQ message */ + req = rwnx_msg_zalloc(APM_START_REQ, TASK_APM, DRV_TASK_ID, + sizeof(struct apm_start_req)); + if (!req) + return -ENOMEM; + + // Build the beacon + bcn->dtim = (u8)settings->dtim_period; + buf = rwnx_build_bcn(bcn, &settings->beacon); + if (!buf) { + rwnx_msg_free(rwnx_hw, req); + return -ENOMEM; + } + + // Retrieve the basic rate set from the beacon buffer + len = bcn->len - var_offset; + var_pos = buf + var_offset; + +// Assume that rate higher that 54 Mbps are BSS membership +#define IS_BASIC_RATE(r) (r & 0x80) && ((r & ~0x80) <= (54 * 2)) + + rate_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); + if (rate_ie) { + const u8 *rates = rate_ie + 2; + for (i = 0; (i < rate_ie[1]) && (rate_len < MAC_RATESET_LEN); i++) { + if (IS_BASIC_RATE(rates[i])) + req->basic_rates.array[rate_len++] = rates[i]; + } + } + rate_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, var_pos, len); + if (rate_ie) { + const u8 *rates = rate_ie + 2; + for (i = 0; (i < rate_ie[1]) && (rate_len < MAC_RATESET_LEN); i++) { + if (IS_BASIC_RATE(rates[i])) + req->basic_rates.array[rate_len++] = rates[i]; + } + } + req->basic_rates.length = rate_len; +#undef IS_BASIC_RATE + + #if 0 + // Sync buffer for FW + if ((error = rwnx_ipc_elem_var_allocs(rwnx_hw, elem, bcn->len, + DMA_TO_DEVICE, buf, NULL, NULL))) { + return error; + } + #else + rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, bcn->len); + #endif + + /* Set parameters for the APM_START_REQ message */ + req->vif_idx = vif->vif_index; + req->bcn_addr = elem->dma_addr; + req->bcn_len = bcn->len; + req->tim_oft = bcn->head_len; + req->tim_len = bcn->tim_len; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + req->chan.band = settings->chandef.chan->band; + req->chan.freq = settings->chandef.chan->center_freq; +#endif + +#ifdef CONFIG_RADAR_OR_IR_DETECT + req->chan.flags = get_chan_flags(settings->chandef.chan->flags); +#else + req->chan.flags = 0; +#endif + printk("chan.flags %u \n",req->chan.flags); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + req->chan.tx_power = chan_to_fw_pwr(settings->chandef.chan->max_power); + req->center_freq1 = settings->chandef.center_freq1; + req->center_freq2 = settings->chandef.center_freq2; + req->ch_width = bw2chnl[settings->chandef.width]; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) + req->chan.band = rwnx_hw->ap_chan.band; + req->chan.freq = rwnx_hw->ap_chan.prim20_freq; + req->center_freq1 = rwnx_hw->ap_chan.center1_freq; + req->center_freq2 = rwnx_hw->ap_chan.center2_freq; + req->chan.tx_power = rwnx_hw->ap_chan.tx_power; +#endif + req->bcn_int = settings->beacon_interval; + if (settings->crypto.control_port) + flags |= CONTROL_PORT_HOST; + + if (settings->crypto.control_port_no_encrypt) + flags |= CONTROL_PORT_NO_ENC; + + if (use_pairwise_key(&settings->crypto)) + flags |= WPA_WPA2_IN_USE; + + if (settings->crypto.control_port_ethertype) + req->ctrl_port_ethertype = settings->crypto.control_port_ethertype; + else + req->ctrl_port_ethertype = ETH_P_PAE; + req->flags = flags; + + /* Send the APM_START_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, APM_START_CFM, cfm); +} + +int rwnx_send_apm_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif) +{ + struct apm_stop_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the APM_STOP_REQ message */ + req = rwnx_msg_zalloc(APM_STOP_REQ, TASK_APM, DRV_TASK_ID, + sizeof(struct apm_stop_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the APM_STOP_REQ message */ + req->vif_idx = vif->vif_index; + + /* Send the APM_STOP_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, APM_STOP_CFM, NULL); +} + + +#define P2P_WILDCARD_SSID "DIRECT-" +#define P2P_WILDCARD_SSID_LEN (sizeof(P2P_WILDCARD_SSID) - 1) + + +#ifdef CONFIG_SET_VENDOR_EXTENSION_IE +u8_l vendor_extension_data[256]; +u8_l vendor_extension_len = 0; +#if 0 +u8_l vendor_extension_data[]={ + 0x10,0x49,0x00,0x17,0x00,0x01,0x37,0x10, + 0x06,0x00,0x10,0xc5,0xc9,0x91,0xeb,0x1f, + 0xce,0x4d,0x00,0xa1,0x2a,0xdf,0xa1,0xe9, + 0xc3,0x44,0xe6,0x10,0x49,0x00,0x21,0x00, + 0x01,0x37,0x20,0x01,0x00,0x01,0x05,0x20, + 0x02,0x00,0x04,0x43,0x56,0x54,0x45,0x20, + 0x05,0x00,0x0d,0x31,0x39,0x32,0x2e,0x31, + 0x36,0x38,0x2e,0x31,0x35,0x34,0x2e,0x31}; +#endif + +void rwnx_insert_vendor_extension_data(struct scanu_vendor_ie_req *ie_req){ + u8_l temp_ie[256]; + u8_l vendor_extension_subelement[3] = {0x00,0x37,0x2A}; + u8_l vendor_extension_id[2] = {0x10,0x49}; + int index = 0; + int vendor_extension_subelement_len = 0; + + memset(temp_ie, 0, 256); + + //find vendor_extension_subelement + for(index = 0; index < ie_req->add_ie_len; index++){ + if(ie_req->ie[index] == vendor_extension_id[0]){ + index++; + if(index == ie_req->add_ie_len){ + return; + } + if(ie_req->ie[index] == vendor_extension_id[1] && + ie_req->ie[index + 3] == vendor_extension_subelement[0]&& + ie_req->ie[index + 4] == vendor_extension_subelement[1]&& + ie_req->ie[index + 5] == vendor_extension_subelement[2]){ + index = index + 2; + vendor_extension_subelement_len = ie_req->ie[index]; + AICWFDBG(LOGDEBUG, "%s find vendor_extension_subelement,index:%d len:%d\r\n", __func__, index, ie_req->ie[index]); + break; + } + } + } + index = index + vendor_extension_subelement_len; + + //insert vendor extension + memcpy(&temp_ie[0], ie_req->ie, index + 1); + memcpy(&temp_ie[index + 1], vendor_extension_data, vendor_extension_len/*sizeof(vendor_extension_data)*/);//insert vendor extension data + memcpy(&temp_ie[index + 1 + vendor_extension_len/*sizeof(vendor_extension_data)*/], &ie_req->ie[index + 1], ie_req->add_ie_len - index); + + memcpy(ie_req->ie, temp_ie, ie_req->add_ie_len + vendor_extension_len/*sizeof(vendor_extension_data)*/); + ie_req->add_ie_len = ie_req->add_ie_len + vendor_extension_len/*sizeof(vendor_extension_data)*/; + ie_req->ie[1] = ie_req->ie[1] + vendor_extension_len/*sizeof(vendor_extension_data)*/; + + //rwnx_data_dump((char*)__func__, (void*)ie_req->ie, ie_req->add_ie_len); +} +#endif//CONFIG_SET_VENDOR_EXTENSION_IE + +int rwnx_send_scanu_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct cfg80211_scan_request *param) +{ + struct scanu_start_req *req = NULL; + struct scanu_vendor_ie_req *ie_req = NULL; + struct mm_add_if_cfm add_if_cfm; + int i; + uint8_t chan_flags = 0; + int err; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the SCANU_START_REQ message */ + req = rwnx_msg_zalloc(SCANU_START_REQ, TASK_SCANU, DRV_TASK_ID, + sizeof(struct scanu_start_req)); + if (!req) + return -ENOMEM; + + rwnx_hw->scanning = 1; + /* Set parameters */ + req->vif_idx = rwnx_vif->vif_index; + req->chan_cnt = (u8)min_t(int, SCAN_CHANNEL_MAX, param->n_channels); + req->ssid_cnt = (u8)min_t(int, SCAN_SSID_MAX, param->n_ssids); + req->bssid = mac_addr_bcst; + req->no_cck = param->no_cck; + +#ifdef CONFIG_RADAR_OR_IR_DETECT + if (req->ssid_cnt == 0) + chan_flags |= CHAN_NO_IR; +#endif + for (i = 0; i < req->ssid_cnt; i++) { + int j; + for (j = 0; j < param->ssids[i].ssid_len; j++) + req->ssid[i].array[j] = param->ssids[i].ssid[j]; + req->ssid[i].length = param->ssids[i].ssid_len; + + if (!memcmp(P2P_WILDCARD_SSID, param->ssids[i].ssid, + P2P_WILDCARD_SSID_LEN)) { + AICWFDBG(LOGINFO, "p2p scanu:%d,%d,%d\n",rwnx_vif->vif_index, rwnx_vif->is_p2p_vif, rwnx_hw->is_p2p_alive); +#ifdef CONFIG_USE_P2P0 + if (rwnx_vif->is_p2p_vif && !rwnx_hw->is_p2p_alive) { +#else + if (rwnx_vif == rwnx_hw->p2p_dev_vif && !rwnx_vif->up) { +#endif + err = rwnx_send_add_if (rwnx_hw, rwnx_vif->wdev.address, + RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm); + if (err) + goto error; + + /* + if ((err = rwnx_send_add_if(rwnx_hw, rwnx_vif->wdev.address, + RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm))) + goto error; + */ + + if (add_if_cfm.status != 0) { + return -EIO; + } + + /* Save the index retrieved from LMAC */ + spin_lock_bh(&rwnx_hw->cb_lock); + rwnx_vif->vif_index = add_if_cfm.inst_nbr; + rwnx_vif->up = true; + rwnx_hw->vif_started++; + rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif; + spin_unlock_bh(&rwnx_hw->cb_lock); + } + rwnx_hw->is_p2p_alive = 1; +#ifndef CONFIG_USE_P2P0 + mod_timer(&rwnx_hw->p2p_alive_timer, jiffies + msecs_to_jiffies(1000)); + atomic_set(&rwnx_hw->p2p_alive_timer_count, 0); +#endif + AICWFDBG(LOGINFO ,"p2p scan start\n"); +#ifdef CONFIG_STA_SCAN_WHEN_P2P_WORKING + rwnx_hw->p2p_working = false; +#else + rwnx_hw->p2p_working = true; +#endif + } + } + +#if 1 + if (param->ie) { + #if 0 + if (rwnx_ipc_elem_var_allocs(rwnx_hw, &rwnx_hw->scan_ie, + param->ie_len, DMA_TO_DEVICE, + NULL, param->ie, NULL)) + goto error; + + req->add_ie_len = param->ie_len; + req->add_ies = rwnx_hw->scan_ie.dma_addr; + #else + ie_req = rwnx_msg_zalloc(SCANU_VENDOR_IE_REQ, TASK_SCANU, DRV_TASK_ID, + sizeof(struct scanu_vendor_ie_req)); + if (!ie_req) + return -ENOMEM; + + ie_req->add_ie_len = param->ie_len; + ie_req->vif_idx = rwnx_vif->vif_index; + memcpy(ie_req->ie, param->ie, param->ie_len); +#ifdef CONFIG_SET_VENDOR_EXTENSION_IE + rwnx_insert_vendor_extension_data(ie_req); +#endif //CONFIG_SET_VENDOR_EXTENSION_IE + req->add_ie_len = 0; + req->add_ies = 0; + + if ((err = rwnx_send_msg(rwnx_hw, ie_req, 1, SCANU_VENDOR_IE_CFM, NULL))) + goto error; + #endif + } + else { + req->add_ie_len = 0; + req->add_ies = 0; + } +#else + req->add_ie_len = 0; + req->add_ies = 0; +#endif + + for (i = 0; i < req->chan_cnt; i++) { + struct ieee80211_channel *chan = param->channels[i]; + AICWFDBG(LOGDEBUG, "scan channel:%d(%d) \r\n", ieee80211_frequency_to_channel(chan->center_freq), chan->center_freq); + req->chan[i].band = chan->band; + req->chan[i].freq = chan->center_freq; + req->chan[i].flags = chan_flags | get_chan_flags(chan->flags); + req->chan[i].tx_power = chan_to_fw_pwr(chan->max_reg_power); + } + + /* Send the SCANU_START_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, SCANU_START_CFM_ADDTIONAL, NULL); +error: + if (req != NULL) + rwnx_msg_free(rwnx_hw, req); + if (ie_req != NULL) + rwnx_msg_free(rwnx_hw, ie_req); + return -ENOMEM; +} + +int rwnx_send_scanu_cancel_req(struct rwnx_hw *rwnx_hw, struct scan_cancel_cfm *cfm) +{ + struct scan_cancel_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the SCAN_CANCEL_REQ message */ + req = rwnx_msg_zalloc(SCANU_CANCEL_REQ, TASK_SCANU, DRV_TASK_ID, + sizeof(struct scan_cancel_req)); + if (!req) + return -ENOMEM; + + /* Send the SCAN_CANCEL_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, SCANU_CANCEL_CFM, cfm); +} + +int rwnx_send_apm_start_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct cfg80211_chan_def *chandef, + struct apm_start_cac_cfm *cfm) +{ + struct apm_start_cac_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the APM_START_CAC_REQ message */ + req = rwnx_msg_zalloc(APM_START_CAC_REQ, TASK_APM, DRV_TASK_ID, + sizeof(struct apm_start_cac_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the APM_START_CAC_REQ message */ + req->vif_idx = vif->vif_index; + req->chan.band = chandef->chan->band; + req->chan.type = bw2chnl[chandef->width]; + req->chan.prim20_freq = chandef->chan->center_freq; + req->chan.center1_freq = chandef->center_freq1; + req->chan.center2_freq = chandef->center_freq2; + req->chan.tx_power = 20; + req->chan.flags = get_chan_flags(chandef->chan->flags); +#ifdef CONFIG_RADAR_OR_IR_DETECT + rwnx_hw->radar.status = RWNX_RADAR_CAC_BUSY; + AICWFDBG(LOGINFO, "DFS: radar st = %d\n", rwnx_hw->radar.status); +#endif + /* Send the APM_START_CAC_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, APM_START_CAC_CFM, cfm); +} + +int rwnx_send_apm_stop_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif) +{ + struct apm_stop_cac_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the APM_STOP_CAC_REQ message */ + req = rwnx_msg_zalloc(APM_STOP_CAC_REQ, TASK_APM, DRV_TASK_ID, + sizeof(struct apm_stop_cac_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the APM_STOP_CAC_REQ message */ + req->vif_idx = vif->vif_index; + + /* Send the APM_STOP_CAC_REQ message to LMAC FW */ +#ifdef CONFIG_RADAR_OR_IR_DETECT + rwnx_hw->radar.status = RWNX_RADAR_CAC_DONE; + AICWFDBG(LOGINFO, "DFS: radar st = %d\n", rwnx_hw->radar.status); +#endif + return rwnx_send_msg(rwnx_hw, req, 1, APM_STOP_CAC_CFM, NULL); +} + +int rwnx_send_mesh_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + const struct mesh_config *conf, const struct mesh_setup *setup, + struct mesh_start_cfm *cfm) +{ + // Message to send + struct mesh_start_req *req; + // Supported basic rates + struct ieee80211_supported_band *band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ]; + /* Counter */ + int i; + /* Return status */ + int status; + /* DMA Address to be unmapped after confirmation reception */ + u32 dma_addr = 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MESH_START_REQ message */ + req = rwnx_msg_zalloc(MESH_START_REQ, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_start_req)); + if (!req) { + return -ENOMEM; + } + + req->vif_index = vif->vif_index; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + req->bcn_int = setup->beacon_interval; + req->dtim_period = setup->dtim_period; +#endif + req->mesh_id_len = setup->mesh_id_len; + + for (i = 0; i < setup->mesh_id_len; i++) { + req->mesh_id[i] = *(setup->mesh_id + i); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) + req->user_mpm = setup->user_mpm; +#endif + req->is_auth = setup->is_authenticated; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + req->auth_id = setup->auth_id; + #endif + req->ie_len = setup->ie_len; + + if (setup->ie_len) { + /* + * Need to provide a Virtual Address to the MAC so that it can download the + * additional information elements. + */ + req->ie_addr = dma_map_single(rwnx_hw->dev, (void *)setup->ie, + setup->ie_len, DMA_FROM_DEVICE); + + /* Check DMA mapping result */ + if (dma_mapping_error(rwnx_hw->dev, req->ie_addr)) { + printk(KERN_CRIT "%s - DMA Mapping error on additional IEs\n", __func__); + + /* Consider there is no Additional IEs */ + req->ie_len = 0; + } else { + /* Store DMA Address so that we can unmap the memory section once MESH_START_CFM is received */ + dma_addr = req->ie_addr; + } + } + + /* Provide rate information */ + req->basic_rates.length = 0; + for (i = 0; i < band_2GHz->n_bitrates; i++) { + u16 rate = band_2GHz->bitrates[i].bitrate; + + /* Read value is in in units of 100 Kbps, provided value is in units + * of 1Mbps, and multiplied by 2 so that 5.5 becomes 11 */ + rate = (rate << 1) / 10; + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) // TODO: check basic rates + if (setup->basic_rates & CO_BIT(i)) { + rate |= 0x80; + } + #endif + + req->basic_rates.array[i] = (u8)rate; + req->basic_rates.length++; + } + + /* Provide channel information */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + req->chan.band = setup->chandef.chan->band; + req->chan.freq = setup->chandef.chan->center_freq; +#endif + + req->chan.flags = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + req->chan.tx_power = chan_to_fw_pwr(setup->chandef.chan->max_power); + req->center_freq1 = setup->chandef.center_freq1; + req->center_freq2 = setup->chandef.center_freq2; + req->ch_width = bw2chnl[setup->chandef.width]; +#endif + + /* Send the MESH_START_REQ message to UMAC FW */ + status = rwnx_send_msg(rwnx_hw, req, 1, MESH_START_CFM, cfm); + + /* Unmap DMA area */ + if (setup->ie_len) { + dma_unmap_single(rwnx_hw->dev, dma_addr, setup->ie_len, DMA_TO_DEVICE); + } + + /* Return the status */ + return (status); +} + +int rwnx_send_mesh_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct mesh_stop_cfm *cfm) +{ + // Message to send + struct mesh_stop_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MESH_STOP_REQ message */ + req = rwnx_msg_zalloc(MESH_STOP_REQ, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_stop_req)); + if (!req) { + return -ENOMEM; + } + + req->vif_idx = vif->vif_index; + + /* Send the MESH_STOP_REQ message to UMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MESH_STOP_CFM, cfm); +} + +int rwnx_send_mesh_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + u32 mask, const struct mesh_config *p_mconf, struct mesh_update_cfm *cfm) +{ + // Message to send + struct mesh_update_req *req; + // Keep only bit for fields which can be updated + u32 supp_mask = (mask << 1) & (CO_BIT(NL80211_MESHCONF_GATE_ANNOUNCEMENTS) + | CO_BIT(NL80211_MESHCONF_HWMP_ROOTMODE) + | CO_BIT(NL80211_MESHCONF_FORWARDING) + | CO_BIT(NL80211_MESHCONF_POWER_MODE)); + + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (!supp_mask) { + return -ENOENT; + } + + /* Build the MESH_UPDATE_REQ message */ + req = rwnx_msg_zalloc(MESH_UPDATE_REQ, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_update_req)); + + if (!req) { + return -ENOMEM; + } + + req->vif_idx = vif->vif_index; + + if (supp_mask & CO_BIT(NL80211_MESHCONF_GATE_ANNOUNCEMENTS)) + { + req->flags |= CO_BIT(MESH_UPDATE_FLAGS_GATE_MODE_BIT); + req->gate_announ = p_mconf->dot11MeshGateAnnouncementProtocol; + } + + if (supp_mask & CO_BIT(NL80211_MESHCONF_HWMP_ROOTMODE)) + { + req->flags |= CO_BIT(MESH_UPDATE_FLAGS_ROOT_MODE_BIT); + req->root_mode = p_mconf->dot11MeshHWMPRootMode; + } + + if (supp_mask & CO_BIT(NL80211_MESHCONF_FORWARDING)) + { + req->flags |= CO_BIT(MESH_UPDATE_FLAGS_MESH_FWD_BIT); + req->mesh_forward = p_mconf->dot11MeshForwarding; + } + + if (supp_mask & CO_BIT(NL80211_MESHCONF_POWER_MODE)) + { + req->flags |= CO_BIT(MESH_UPDATE_FLAGS_LOCAL_PSM_BIT); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + req->local_ps_mode = p_mconf->power_mode; +#endif + } + + /* Send the MESH_UPDATE_REQ message to UMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MESH_UPDATE_CFM, cfm); +} + +int rwnx_send_mesh_peer_info_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + u8 sta_idx, struct mesh_peer_info_cfm *cfm) +{ + // Message to send + struct mesh_peer_info_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MESH_PEER_INFO_REQ message */ + req = rwnx_msg_zalloc(MESH_PEER_INFO_REQ, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_peer_info_req)); + if (!req) { + return -ENOMEM; + } + + req->sta_idx = sta_idx; + + /* Send the MESH_PEER_INFO_REQ message to UMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MESH_PEER_INFO_CFM, cfm); +} + +void rwnx_send_mesh_peer_update_ntf(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + u8 sta_idx, u8 mlink_state) +{ + // Message to send + struct mesh_peer_update_ntf *ntf; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MESH_PEER_UPDATE_NTF message */ + ntf = rwnx_msg_zalloc(MESH_PEER_UPDATE_NTF, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_peer_update_ntf)); + + if (ntf) { + ntf->vif_idx = vif->vif_index; + ntf->sta_idx = sta_idx; + ntf->state = mlink_state; + + /* Send the MESH_PEER_INFO_REQ message to UMAC FW */ + rwnx_send_msg(rwnx_hw, ntf, 0, 0, NULL); + } +} + +void rwnx_send_mesh_path_create_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *tgt_addr) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Check if we are already waiting for a confirmation */ + if (!vif->ap.create_path) { + // Message to send + struct mesh_path_create_req *req; + + /* Build the MESH_PATH_CREATE_REQ message */ + req = rwnx_msg_zalloc(MESH_PATH_CREATE_REQ, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_path_create_req)); + + if (req) { + req->vif_idx = vif->vif_index; + memcpy(&req->tgt_mac_addr, tgt_addr, ETH_ALEN); + + vif->ap.create_path = true; + + /* Send the MESH_PATH_CREATE_REQ message to UMAC FW */ + rwnx_send_msg(rwnx_hw, req, 0, 0, NULL); + } + } +} + +int rwnx_send_mesh_path_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, const u8 *tgt_addr, + const u8 *p_nhop_addr, struct mesh_path_update_cfm *cfm) +{ + // Message to send + struct mesh_path_update_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MESH_PATH_UPDATE_REQ message */ + req = rwnx_msg_zalloc(MESH_PATH_UPDATE_REQ, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_path_update_req)); + if (!req) { + return -ENOMEM; + } + + req->delete = (p_nhop_addr == NULL); + req->vif_idx = vif->vif_index; + memcpy(&req->tgt_mac_addr, tgt_addr, ETH_ALEN); + + if (p_nhop_addr) { + memcpy(&req->nhop_mac_addr, p_nhop_addr, ETH_ALEN); + } + + /* Send the MESH_PATH_UPDATE_REQ message to UMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MESH_PATH_UPDATE_CFM, cfm); +} + +void rwnx_send_mesh_proxy_add_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *ext_addr) +{ + // Message to send + struct mesh_proxy_add_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MESH_PROXY_ADD_REQ message */ + req = rwnx_msg_zalloc(MESH_PROXY_ADD_REQ, TASK_MESH, DRV_TASK_ID, + sizeof(struct mesh_proxy_add_req)); + + if (req) { + req->vif_idx = vif->vif_index; + memcpy(&req->ext_sta_addr, ext_addr, ETH_ALEN); + + /* Send the MESH_PROXY_ADD_REQ message to UMAC FW */ + rwnx_send_msg(rwnx_hw, req, 0, 0, NULL); + } +} + +int rwnx_send_tdls_peer_traffic_ind_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif) +{ + struct tdls_peer_traffic_ind_req *tdls_peer_traffic_ind_req; + + if (!rwnx_vif->sta.tdls_sta) + return -ENOLINK; + + /* Build the TDLS_PEER_TRAFFIC_IND_REQ message */ + tdls_peer_traffic_ind_req = rwnx_msg_zalloc(TDLS_PEER_TRAFFIC_IND_REQ, TASK_TDLS, DRV_TASK_ID, + sizeof(struct tdls_peer_traffic_ind_req)); + + if (!tdls_peer_traffic_ind_req) + return -ENOMEM; + + /* Set parameters for the TDLS_PEER_TRAFFIC_IND_REQ message */ + tdls_peer_traffic_ind_req->vif_index = rwnx_vif->vif_index; + tdls_peer_traffic_ind_req->sta_idx = rwnx_vif->sta.tdls_sta->sta_idx; + memcpy(&(tdls_peer_traffic_ind_req->peer_mac_addr.array[0]), + rwnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN); + tdls_peer_traffic_ind_req->dialog_token = 0; // check dialog token value + tdls_peer_traffic_ind_req->last_tid = rwnx_vif->sta.tdls_sta->tdls.last_tid; + tdls_peer_traffic_ind_req->last_sn = rwnx_vif->sta.tdls_sta->tdls.last_sn; + + /* Send the TDLS_PEER_TRAFFIC_IND_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, tdls_peer_traffic_ind_req, 0, 0, NULL); +} + +int rwnx_send_config_monitor_req(struct rwnx_hw *rwnx_hw, + struct cfg80211_chan_def *chandef, + struct me_config_monitor_cfm *cfm) +{ + struct me_config_monitor_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the ME_CONFIG_MONITOR_REQ message */ + req = rwnx_msg_zalloc(ME_CONFIG_MONITOR_REQ, TASK_ME, DRV_TASK_ID, + sizeof(struct me_config_monitor_req)); + if (!req) + return -ENOMEM; + + if (chandef) { + req->chan_set = true; + + req->chan.band = chandef->chan->band; + req->chan.type = bw2chnl[chandef->width]; + req->chan.prim20_freq = chandef->chan->center_freq; + req->chan.center1_freq = chandef->center_freq1; + req->chan.center2_freq = chandef->center_freq2; + req->chan.tx_power = chan_to_fw_pwr(chandef->chan->max_power); + + if (rwnx_hw->phy.limit_bw) + limit_chan_bw(&req->chan.type, req->chan.prim20_freq, &req->chan.center1_freq); + } else { + req->chan_set = false; + } + + req->uf = rwnx_hw->mod_params->uf; + req->auto_reply = rwnx_hw->mod_params->auto_reply; + + /* Send the ME_CONFIG_MONITOR_REQ message to FW */ + return rwnx_send_msg(rwnx_hw, req, 1, ME_CONFIG_MONITOR_CFM, cfm); +} +#endif /* CONFIG_RWNX_FULLMAC */ + +int rwnx_send_tdls_chan_switch_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct rwnx_sta *rwnx_sta, bool sta_initiator, + u8 oper_class, struct cfg80211_chan_def *chandef, + struct tdls_chan_switch_cfm *cfm) +{ + struct tdls_chan_switch_req *tdls_chan_switch_req; + + + /* Build the TDLS_CHAN_SWITCH_REQ message */ + tdls_chan_switch_req = rwnx_msg_zalloc(TDLS_CHAN_SWITCH_REQ, TASK_TDLS, DRV_TASK_ID, + sizeof(struct tdls_chan_switch_req)); + + if (!tdls_chan_switch_req) + return -ENOMEM; + + /* Set parameters for the TDLS_CHAN_SWITCH_REQ message */ + tdls_chan_switch_req->vif_index = rwnx_vif->vif_index; + tdls_chan_switch_req->sta_idx = rwnx_sta->sta_idx; + memcpy(&(tdls_chan_switch_req->peer_mac_addr.array[0]), + rwnx_sta_addr(rwnx_sta), ETH_ALEN); + tdls_chan_switch_req->initiator = sta_initiator; + tdls_chan_switch_req->band = chandef->chan->band; + tdls_chan_switch_req->type = bw2chnl[chandef->width]; + tdls_chan_switch_req->prim20_freq = chandef->chan->center_freq; + tdls_chan_switch_req->center1_freq = chandef->center_freq1; + tdls_chan_switch_req->center2_freq = chandef->center_freq2; + tdls_chan_switch_req->tx_power = chan_to_fw_pwr(chandef->chan->max_power); + tdls_chan_switch_req->op_class = oper_class; + + /* Send the TDLS_CHAN_SWITCH_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, tdls_chan_switch_req, 1, TDLS_CHAN_SWITCH_CFM, cfm); +} + +int rwnx_send_tdls_cancel_chan_switch_req(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + struct rwnx_sta *rwnx_sta, + struct tdls_cancel_chan_switch_cfm *cfm) +{ + struct tdls_cancel_chan_switch_req *tdls_cancel_chan_switch_req; + + /* Build the TDLS_CHAN_SWITCH_REQ message */ + tdls_cancel_chan_switch_req = rwnx_msg_zalloc(TDLS_CANCEL_CHAN_SWITCH_REQ, TASK_TDLS, DRV_TASK_ID, + sizeof(struct tdls_cancel_chan_switch_req)); + if (!tdls_cancel_chan_switch_req) + return -ENOMEM; + + /* Set parameters for the TDLS_CHAN_SWITCH_REQ message */ + tdls_cancel_chan_switch_req->vif_index = rwnx_vif->vif_index; + tdls_cancel_chan_switch_req->sta_idx = rwnx_sta->sta_idx; + memcpy(&(tdls_cancel_chan_switch_req->peer_mac_addr.array[0]), + rwnx_sta_addr(rwnx_sta), ETH_ALEN); + + /* Send the TDLS_CHAN_SWITCH_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, tdls_cancel_chan_switch_req, 1, TDLS_CANCEL_CHAN_SWITCH_CFM, cfm); +} + +#ifdef CONFIG_RWNX_BFMER +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_send_bfmer_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta, + const struct ieee80211_vht_cap *vht_cap) +#endif /* CONFIG_RWNX_FULLMAC*/ +{ + struct mm_bfmer_enable_req *bfmer_en_req; +#ifdef CONFIG_RWNX_FULLMAC + __le32 vht_capability; + u8 rx_nss = 0; +#endif /* CONFIG_RWNX_FULLMAC */ + + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#ifdef CONFIG_RWNX_FULLMAC + if (!vht_cap) { +#endif /* CONFIG_RWNX_FULLMAC */ + goto end; + } + +#ifdef CONFIG_RWNX_FULLMAC + vht_capability = vht_cap->vht_cap_info; +#endif /* CONFIG_RWNX_FULLMAC */ + + if (!(vht_capability & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { + goto end; + } + +#ifdef CONFIG_RWNX_FULLMAC + rx_nss = rwnx_bfmer_get_rx_nss(vht_cap); +#endif /* CONFIG_RWNX_FULLMAC */ + + /* Allocate a structure that will contain the beamforming report */ + if (rwnx_bfmer_report_add(rwnx_hw, rwnx_sta, RWNX_BFMER_REPORT_SPACE_SIZE)) + { + goto end; + } + + /* Build the MM_BFMER_ENABLE_REQ message */ + bfmer_en_req = rwnx_msg_zalloc(MM_BFMER_ENABLE_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_bfmer_enable_req)); + + /* Check message allocation */ + if (!bfmer_en_req) { + /* Free memory allocated for the report */ + rwnx_bfmer_report_del(rwnx_hw, rwnx_sta); + + /* Do not use beamforming */ + goto end; + } + + /* Provide DMA address to the MAC */ + bfmer_en_req->host_bfr_addr = rwnx_sta->bfm_report->dma_addr; + bfmer_en_req->host_bfr_size = RWNX_BFMER_REPORT_SPACE_SIZE; + bfmer_en_req->sta_idx = rwnx_sta->sta_idx; +#ifdef CONFIG_RWNX_FULLMAC + bfmer_en_req->aid = rwnx_sta->aid; + bfmer_en_req->rx_nss = rx_nss; +#endif /* CONFIG_RWNX_FULLMAC */ + + if (vht_capability & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) { + bfmer_en_req->vht_mu_bfmee = true; + } else { + bfmer_en_req->vht_mu_bfmee = false; + } + + /* Send the MM_BFMER_EN_REQ message to LMAC FW */ + rwnx_send_msg(rwnx_hw, bfmer_en_req, 0, 0, NULL); + +end: + return; +} + +#ifdef CONFIG_RWNX_MUMIMO_TX +int rwnx_send_mu_group_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta) +{ + struct mm_mu_group_update_req *req; + int group_id, i = 0; + u64 map; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_MU_GROUP_UPDATE_REQ message */ + req = rwnx_msg_zalloc(MM_MU_GROUP_UPDATE_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_mu_group_update_req) + + rwnx_sta->group_info.cnt * sizeof(req->groups[0])); + + /* Check message allocation */ + if (!req) + return -ENOMEM; + + /* Go through the groups the STA belongs to */ + group_sta_for_each(rwnx_sta, group_id, map) { + int user_pos = rwnx_mu_group_sta_get_pos(rwnx_hw, rwnx_sta, group_id); + + if (WARN((i >= rwnx_sta->group_info.cnt), + "STA%d: Too much group (%d)\n", + rwnx_sta->sta_idx, i + 1)) + break; + + req->groups[i].group_id = group_id; + req->groups[i].user_pos = user_pos; + + i++; + } + + req->group_cnt = rwnx_sta->group_info.cnt; + req->sta_idx = rwnx_sta->sta_idx; + + /* Send the MM_MU_GROUP_UPDATE_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MM_MU_GROUP_UPDATE_CFM, NULL); +} +#endif /* CONFIG_RWNX_MUMIMO_TX */ +#endif /* CONFIG_RWNX_BFMER */ + +/********************************************************************** + * Debug Messages + *********************************************************************/ +int rwnx_send_dbg_trigger_req(struct rwnx_hw *rwnx_hw, char *msg) +{ + struct mm_dbg_trigger_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_DBG_TRIGGER_REQ message */ + req = rwnx_msg_zalloc(MM_DBG_TRIGGER_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_dbg_trigger_req)); + if (!req) + return -ENOMEM; + + /* Set parameters for the MM_DBG_TRIGGER_REQ message */ + strncpy(req->error, msg, sizeof(req->error)); + + /* Send the MM_DBG_TRIGGER_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 0, -1, NULL); +} + +int rwnx_send_dbg_mem_read_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + struct dbg_mem_read_cfm *cfm) +{ + struct dbg_mem_read_req *mem_read_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_MEM_READ_REQ message */ + mem_read_req = rwnx_msg_zalloc(DBG_MEM_READ_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_read_req)); + if (!mem_read_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_READ_REQ message */ + mem_read_req->memaddr = mem_addr; + + /* Send the DBG_MEM_READ_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, mem_read_req, 1, DBG_MEM_READ_CFM, cfm); +} + +int rwnx_send_dbg_mem_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_data) +{ + struct dbg_mem_write_req *mem_write_req; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_MEM_WRITE_REQ message */ + mem_write_req = rwnx_msg_zalloc(DBG_MEM_WRITE_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_write_req)); + if (!mem_write_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_WRITE_REQ message */ + mem_write_req->memaddr = mem_addr; + mem_write_req->memdata = mem_data; + + /* Send the DBG_MEM_WRITE_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, mem_write_req, 1, DBG_MEM_WRITE_CFM, NULL); +} + +int rwnx_send_dbg_mem_mask_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_mask, u32 mem_data) +{ + struct dbg_mem_mask_write_req *mem_mask_write_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_MEM_MASK_WRITE_REQ message */ + mem_mask_write_req = rwnx_msg_zalloc(DBG_MEM_MASK_WRITE_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_mask_write_req)); + if (!mem_mask_write_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_MASK_WRITE_REQ message */ + mem_mask_write_req->memaddr = mem_addr; + mem_mask_write_req->memmask = mem_mask; + mem_mask_write_req->memdata = mem_data; + + /* Send the DBG_MEM_MASK_WRITE_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, mem_mask_write_req, 1, DBG_MEM_MASK_WRITE_CFM, NULL); +} + +#ifdef CONFIG_RFTEST +int rwnx_send_rftest_req(struct rwnx_hw *rwnx_hw, u32_l cmd, u32_l argc, u8_l *argv, struct dbg_rftest_cmd_cfm *cfm) +{ + struct dbg_rftest_cmd_req *mem_rftest_cmd_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_RFTEST_CMD_REQ message */ + mem_rftest_cmd_req = rwnx_msg_zalloc(DBG_RFTEST_CMD_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_rftest_cmd_req)); + if (!mem_rftest_cmd_req) + return -ENOMEM; + + if(argc > 30) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_MASK_WRITE_REQ message */ + mem_rftest_cmd_req->cmd = cmd; + mem_rftest_cmd_req->argc = argc; + if(argc != 0) + memcpy(mem_rftest_cmd_req->argv, argv, argc); + + /* Send the DBG_RFTEST_CMD_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, mem_rftest_cmd_req, 1, DBG_RFTEST_CMD_CFM, cfm); +} +#endif + +int rwnx_send_dbg_set_mod_filter_req(struct rwnx_hw *rwnx_hw, u32 filter) +{ + struct dbg_set_mod_filter_req *set_mod_filter_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_SET_MOD_FILTER_REQ message */ + set_mod_filter_req = + rwnx_msg_zalloc(DBG_SET_MOD_FILTER_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_set_mod_filter_req)); + if (!set_mod_filter_req) + return -ENOMEM; + + /* Set parameters for the DBG_SET_MOD_FILTER_REQ message */ + set_mod_filter_req->mod_filter = filter; + + /* Send the DBG_SET_MOD_FILTER_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, set_mod_filter_req, 1, DBG_SET_MOD_FILTER_CFM, NULL); +} + +int rwnx_send_dbg_set_sev_filter_req(struct rwnx_hw *rwnx_hw, u32 filter) +{ + struct dbg_set_sev_filter_req *set_sev_filter_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_SET_SEV_FILTER_REQ message */ + set_sev_filter_req = + rwnx_msg_zalloc(DBG_SET_SEV_FILTER_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_set_sev_filter_req)); + if (!set_sev_filter_req) + return -ENOMEM; + + /* Set parameters for the DBG_SET_SEV_FILTER_REQ message */ + set_sev_filter_req->sev_filter = filter; + + /* Send the DBG_SET_SEV_FILTER_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, set_sev_filter_req, 1, DBG_SET_SEV_FILTER_CFM, NULL); +} + +int rwnx_send_dbg_get_sys_stat_req(struct rwnx_hw *rwnx_hw, + struct dbg_get_sys_stat_cfm *cfm) +{ + void *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Allocate the message */ + req = rwnx_msg_zalloc(DBG_GET_SYS_STAT_REQ, TASK_DBG, DRV_TASK_ID, 0); + if (!req) + return -ENOMEM; + + /* Send the DBG_MEM_READ_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, DBG_GET_SYS_STAT_CFM, cfm); +} + +int rwnx_send_dbg_mem_block_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_size, u32 *mem_data) +{ + struct dbg_mem_block_write_req *mem_blk_write_req; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_MEM_BLOCK_WRITE_REQ message */ + mem_blk_write_req = rwnx_msg_zalloc(DBG_MEM_BLOCK_WRITE_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_block_write_req)); + if (!mem_blk_write_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_BLOCK_WRITE_REQ message */ + mem_blk_write_req->memaddr = mem_addr; + mem_blk_write_req->memsize = mem_size; + memcpy(mem_blk_write_req->memdata, mem_data, mem_size); + + /* Send the DBG_MEM_BLOCK_WRITE_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, mem_blk_write_req, 1, DBG_MEM_BLOCK_WRITE_CFM, NULL); +} + +int rwnx_send_dbg_mem_block_read_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_size, struct dbg_mem_block_read_cfm *cfm) +{ + struct dbg_mem_block_read_req *mem_blk_read_req; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_MEM_BLOCK_READ_REQ message */ + mem_blk_read_req = rwnx_msg_zalloc(DBG_MEM_BLOCK_READ_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_block_read_req)); + if (!mem_blk_read_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_BLOCK_READ_REQ message */ + mem_blk_read_req->memaddr = mem_addr; + mem_blk_read_req->memsize = mem_size; + + /* Send the DBG_MEM_BLOCK_READ_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, mem_blk_read_req, 1, DBG_MEM_BLOCK_READ_CFM, cfm); +} + +int rwnx_send_dbg_start_app_req(struct rwnx_hw *rwnx_hw, u32 boot_addr, + u32 boot_type) +{ + struct dbg_start_app_req *start_app_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the DBG_START_APP_REQ message */ + start_app_req = rwnx_msg_zalloc(DBG_START_APP_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_start_app_req)); + if (!start_app_req) + return -ENOMEM; + + /* Set parameters for the DBG_START_APP_REQ message */ + start_app_req->bootaddr = boot_addr; + start_app_req->boottype = boot_type; + + /* Send the DBG_START_APP_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, start_app_req, 1, DBG_START_APP_CFM, NULL); +} + +int rwnx_send_dbg_gpio_write_req(struct rwnx_hw *rwnx_hw, u8 gpio_idx, u8 gpio_val) +{ + struct dbg_gpio_write_req *gpio_write_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + gpio_write_req = rwnx_msg_zalloc(DBG_GPIO_WRITE_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_gpio_write_req)); + if (!gpio_write_req) + return -ENOMEM; + + gpio_write_req->gpio_idx = gpio_idx; + gpio_write_req->gpio_val = gpio_val; + + return rwnx_send_msg(rwnx_hw, gpio_write_req, 1, DBG_GPIO_WRITE_CFM, NULL); +} + +int rwnx_send_dbg_gpio_read_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, struct dbg_gpio_read_cfm *cfm) +{ + struct dbg_gpio_read_req *gpio_read_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + gpio_read_req = rwnx_msg_zalloc(DBG_GPIO_READ_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_gpio_read_req)); + if (!gpio_read_req) + return -ENOMEM; + + gpio_read_req->gpio_idx = gpio_idx; + + return rwnx_send_msg(rwnx_hw, gpio_read_req, 1, DBG_GPIO_READ_CFM, cfm); +} + +int rwnx_send_dbg_gpio_init_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, u8_l gpio_dir, u8_l gpio_val) +{ + struct dbg_gpio_init_req *gpio_init_req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + gpio_init_req = rwnx_msg_zalloc(DBG_GPIO_INIT_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_gpio_init_req)); + if (!gpio_init_req) + return -ENOMEM; + + gpio_init_req->gpio_idx = gpio_idx; + gpio_init_req->gpio_dir = gpio_dir; + gpio_init_req->gpio_val = gpio_val; + + return rwnx_send_msg(rwnx_hw, gpio_init_req, 1, DBG_GPIO_INIT_CFM, NULL); +} + +#ifdef CONFIG_WOWLAN +int rwnx_send_set_pkt_filter_req(struct rwnx_hw *rwnx_hw, u8_l *param) +{ + struct mm_set_wakeup_info_req *req; + int i, err; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the WAKEUP_INFO_REQ message */ + if (param[2] == 0) + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_wakeup_info_req)); + else + req = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_set_wakeup_info_req) + + 2 * param[2] - 1); + if (!req) + return -ENOMEM; + + req->hwconfig_id = WAKEUP_INFO_REQ; + req->code = (u16_l)param[0]; + req->offset = (u16_l)param[1]; + req->length = (u16_l)param[2]; + AICWFDBG(LOGINFO, + "set_wakeup_info_req: id: 0x%x, offset: 0x%x, length: 0x%x\n", + req->code, req->offset, req->length); + // AICWFDBG(LOG_INFO, "set_wakeup_info_req mask_pattern:\n"); + for (i = 0; i < 2 * req->length; i++) { + req->mask_and_pattern[i] = param[3 + i]; + // AICWFDBG(LOG_INFO, KERN_CONT"%02x ", req->mask_and_pattern[i]); + } + + /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */ + err = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL); + if (err) { + AICWFDBG(LOGINFO, "%s msg_fail\n", __func__); + return err; + } + + return err; +} +#endif + +int rwnx_send_cfg_rssi_req(struct rwnx_hw *rwnx_hw, u8 vif_index, int rssi_thold, u32 rssi_hyst) +{ + struct mm_cfg_rssi_req *req; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* Build the MM_CFG_RSSI_REQ message */ + req = rwnx_msg_zalloc(MM_CFG_RSSI_REQ, TASK_MM, DRV_TASK_ID, + sizeof(struct mm_cfg_rssi_req)); + if (!req) + return -ENOMEM; + + if(rwnx_hw->vif_table[vif_index]==NULL) + return 0; + + /* Set parameters for the MM_CFG_RSSI_REQ message */ + req->vif_index = vif_index; + req->rssi_thold = (s8)rssi_thold; + req->rssi_hyst = (u8)rssi_hyst; + + /* Send the MM_CFG_RSSI_REQ message to LMAC FW */ + return rwnx_send_msg(rwnx_hw, req, 1, MM_CFG_RSSI_CFM, NULL); +} + +//#ifdef CONFIG_USB_BT +int rwnx_send_reboot(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + u32 delay = 2 *1000; //1s + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + ret = rwnx_send_dbg_start_app_req(rwnx_hw, delay, HOST_START_APP_REBOOT); + return ret; +} +//#endif // CONFIG_USB_BT +#ifdef CONFIG_WOWLAN +int rwnx_send_dummy_reboot(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + u32 delay = 2 *1000; //1s + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + ret = rwnx_send_dbg_start_app_req(rwnx_hw, delay, 6); + return ret; +} +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_tx.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_tx.h new file mode 100644 index 0000000..1974095 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_msg_tx.h @@ -0,0 +1,206 @@ +/** + **************************************************************************************** + * + * @file rwnx_msg_tx.h + * + * @brief TX function declarations + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ + +#ifndef _RWNX_MSG_TX_H_ +#define _RWNX_MSG_TX_H_ + +#include "rwnx_defs.h" + +#ifdef RF_WRITE_FILE +#define FW_RF_CALIB_FILE "aic_rf_calib.bin" +#endif + + +int rwnx_send_reset(struct rwnx_hw *rwnx_hw); +int rwnx_send_start(struct rwnx_hw *rwnx_hw); +int rwnx_send_version_req(struct rwnx_hw *rwnx_hw, struct mm_version_cfm *cfm); +int rwnx_send_add_if(struct rwnx_hw *rwnx_hw, const unsigned char *mac, + enum nl80211_iftype iftype, bool p2p, struct mm_add_if_cfm *cfm); +int rwnx_send_remove_if(struct rwnx_hw *rwnx_hw, u8 vif_index, bool defer); +int rwnx_send_set_channel(struct rwnx_hw *rwnx_hw, int phy_idx, + struct mm_set_channel_cfm *cfm); +int rwnx_send_key_add(struct rwnx_hw *rwnx_hw, u8 vif_idx, u8 sta_idx, bool pairwise, + u8 *key, u8 key_len, u8 key_idx, u8 cipher_suite, + struct mm_key_add_cfm *cfm); +int rwnx_send_key_del(struct rwnx_hw *rwnx_hw, uint8_t hw_key_idx); +int rwnx_send_bcn(struct rwnx_hw *rwnx_hw,u8 *buf, u8 vif_idx, u16 bcn_len); + +int rwnx_send_bcn_change(struct rwnx_hw *rwnx_hw, u8 vif_idx, u32 bcn_addr, + u16 bcn_len, u16 tim_oft, u16 tim_len, u16 *csa_oft); +int rwnx_send_tim_update(struct rwnx_hw *rwnx_hw, u8 vif_idx, u16 aid, + u8 tx_status); +int rwnx_send_roc(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct ieee80211_channel *chan, unsigned int duration, struct mm_remain_on_channel_cfm *roc_cfm); +int rwnx_send_cancel_roc(struct rwnx_hw *rwnx_hw); +int rwnx_send_set_power(struct rwnx_hw *rwnx_hw, u8 vif_idx, s8 pwr, + struct mm_set_power_cfm *cfm); +int rwnx_send_set_edca(struct rwnx_hw *rwnx_hw, u8 hw_queue, u32 param, + bool uapsd, u8 inst_nbr); +int rwnx_send_tdls_chan_switch_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct rwnx_sta *rwnx_sta, bool sta_initiator, + u8 oper_class, struct cfg80211_chan_def *chandef, + struct tdls_chan_switch_cfm *cfm); +int rwnx_send_tdls_cancel_chan_switch_req(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + struct rwnx_sta *rwnx_sta, + struct tdls_cancel_chan_switch_cfm *cfm); + +#ifdef CONFIG_RWNX_P2P_DEBUGFS +int rwnx_send_p2p_oppps_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u8 ctw, struct mm_set_p2p_oppps_cfm *cfm); +int rwnx_send_p2p_noa_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + int count, int interval, int duration, + bool dyn_noa, struct mm_set_p2p_noa_cfm *cfm); +#endif /* CONFIG_RWNX_P2P_DEBUGFS */ + +#ifdef AICWF_ARP_OFFLOAD +int rwnx_send_arpoffload_en_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u32_l ipaddr, u8_l enable); +#endif +int rwnx_send_rf_config_req(struct rwnx_hw *rwnx_hw, u8_l ofst, u8_l sel, u8_l *tbl, u16_l len); +int rwnx_send_rf_calib_req(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm); +int rwnx_send_get_macaddr_req(struct rwnx_hw *rwnx_hw, struct mm_get_mac_addr_cfm *cfm); + +#ifdef CONFIG_RWNX_FULLMAC +int rwnx_send_me_config_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_me_chan_config_req(struct rwnx_hw *rwnx_hw, char *ccode); +int rwnx_send_me_set_control_port_req(struct rwnx_hw *rwnx_hw, bool opened, + u8 sta_idx); +int rwnx_send_me_sta_add(struct rwnx_hw *rwnx_hw, struct station_parameters *params, + const u8 *mac, u8 inst_nbr, struct me_sta_add_cfm *cfm); +int rwnx_send_me_sta_del(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool tdls_sta); +int rwnx_send_me_traffic_ind(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool uapsd, u8 tx_status); +int rwnx_send_me_rc_stats(struct rwnx_hw *rwnx_hw, u8 sta_idx, + struct me_rc_stats_cfm *cfm); +int rwnx_send_me_rc_set_rate(struct rwnx_hw *rwnx_hw, + u8 sta_idx, + u16 rate_idx); +int rwnx_send_me_set_ps_mode(struct rwnx_hw *rwnx_hw, u8 ps_mode); +int rwnx_send_me_set_lp_level(struct rwnx_hw *rwnx_hw, u8 lp_level); +int rwnx_send_sm_connect_req(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + struct cfg80211_connect_params *sme, + struct sm_connect_cfm *cfm); +int rwnx_send_sm_disconnect_req(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + u16 reason); +int rwnx_send_sm_external_auth_required_rsp(struct rwnx_hw *rwnx_hw, + struct rwnx_vif *rwnx_vif, + u16 status); +int rwnx_send_apm_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct cfg80211_ap_settings *settings, + struct apm_start_cfm *cfm, + struct rwnx_ipc_elem_var *elem); +int rwnx_send_apm_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif); +int rwnx_send_scanu_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct cfg80211_scan_request *param); +int rwnx_send_scanu_cancel_req(struct rwnx_hw *rwnx_hw, + struct scan_cancel_cfm *cfm); + +int rwnx_send_apm_start_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct cfg80211_chan_def *chandef, + struct apm_start_cac_cfm *cfm); +int rwnx_send_apm_stop_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif); +int rwnx_send_tdls_peer_traffic_ind_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif); +int rwnx_send_config_monitor_req(struct rwnx_hw *rwnx_hw, + struct cfg80211_chan_def *chandef, + struct me_config_monitor_cfm *cfm); +int rwnx_send_mesh_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + const struct mesh_config *conf, const struct mesh_setup *setup, + struct mesh_start_cfm *cfm); +int rwnx_send_mesh_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + struct mesh_stop_cfm *cfm); +int rwnx_send_mesh_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + u32 mask, const struct mesh_config *p_mconf, struct mesh_update_cfm *cfm); +int rwnx_send_mesh_peer_info_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + u8 sta_idx, struct mesh_peer_info_cfm *cfm); +void rwnx_send_mesh_peer_update_ntf(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, + u8 sta_idx, u8 mlink_state); +void rwnx_send_mesh_path_create_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *tgt_addr); +int rwnx_send_mesh_path_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, const u8 *tgt_addr, + const u8 *p_nhop_addr, struct mesh_path_update_cfm *cfm); +void rwnx_send_mesh_proxy_add_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *ext_addr); +#endif /* CONFIG_RWNX_FULLMAC */ + +#ifdef CONFIG_RWNX_BFMER +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_send_bfmer_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta, + const struct ieee80211_vht_cap *vht_cap); +#endif /* CONFIG_RWNX_FULLMAC */ +#ifdef CONFIG_RWNX_MUMIMO_TX +int rwnx_send_mu_group_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta); +#endif /* CONFIG_RWNX_MUMIMO_TX */ +#endif /* CONFIG_RWNX_BFMER */ + +/* Debug messages */ +int rwnx_send_dbg_trigger_req(struct rwnx_hw *rwnx_hw, char *msg); +int rwnx_send_dbg_mem_read_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + struct dbg_mem_read_cfm *cfm); +int rwnx_send_dbg_mem_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_data); +int rwnx_send_dbg_mem_mask_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_mask, u32 mem_data); +int rwnx_send_dbg_set_mod_filter_req(struct rwnx_hw *rwnx_hw, u32 filter); +#ifdef CONFIG_RFTEST +int rwnx_send_rftest_req(struct rwnx_hw *rwnx_hw, u32_l cmd, u32_l argc, u8_l *argv, struct dbg_rftest_cmd_cfm *cfm); +#endif +int rwnx_send_dbg_set_sev_filter_req(struct rwnx_hw *rwnx_hw, u32 filter); +int rwnx_send_dbg_get_sys_stat_req(struct rwnx_hw *rwnx_hw, + struct dbg_get_sys_stat_cfm *cfm); +int rwnx_send_dbg_mem_block_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_size, u32 *mem_data); +int rwnx_send_dbg_mem_block_read_req(struct rwnx_hw *rwnx_hw, u32 mem_addr, + u32 mem_size, struct dbg_mem_block_read_cfm *cfm); +int rwnx_send_dbg_start_app_req(struct rwnx_hw *rwnx_hw, u32 boot_addr, + u32 boot_type); +int rwnx_send_dbg_gpio_write_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, u8_l gpio_val); +int rwnx_send_dbg_gpio_read_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, struct dbg_gpio_read_cfm *cfm); +int rwnx_send_dbg_gpio_init_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, u8_l gpio_dir, u8_l gpio_val); +int rwnx_send_cfg_rssi_req(struct rwnx_hw *rwnx_hw, u8 vif_index, int rssi_thold, u32 rssi_hyst); +int rwnx_send_disable_agg_req(struct rwnx_hw *rwnx_hw, u8_l agg_disable, u8_l agg_disable_rx, u8_l sta_idx); +int rwnx_send_coex_req(struct rwnx_hw *rwnx_hw, u8_l disable_coexnull, u8_l enable_nullcts); +int rwnx_send_get_sta_info_req(struct rwnx_hw *rwnx_hw, u8_l sta_idx, struct mm_get_sta_info_cfm *cfm); +int rwnx_send_set_stack_start_req(struct rwnx_hw *rwnx_hw, u8_l on, u8_l efuse_valid, u8_l set_vendor_info, + u8_l fwtrace_redir_en, struct mm_set_stack_start_cfm *cfm); +int rwnx_send_txop_req(struct rwnx_hw *rwnx_hw, uint16_t *txop, u8_l long_nav_en, u8_l cfe_en); +int rwnx_send_set_temp_comp_req(struct rwnx_hw *rwnx_hw, struct mm_set_vendor_swconfig_cfm *cfm); +int rwnx_send_vendor_hwconfig_req(struct rwnx_hw *rwnx_hw, uint32_t hwconfig_id, int32_t *param, int32_t *param_out); +int rwnx_send_vendor_swconfig_req(struct rwnx_hw *rwnx_hw, uint32_t swconfig_id, int32_t *param_in, int32_t *param_out); +int rwnx_send_mask_set_ext_flags_req(struct rwnx_hw *rwnx_hw, uint32_t flags_mask, uint32_t flags_val, struct mm_set_vendor_swconfig_cfm *cfm); +int rwnx_send_vendor_hwconfig_req_x2(struct rwnx_hw *rwnx_hw, uint32_t hwconfig_id, int32_t *param, int32_t *param_out); +int rwnx_send_vendor_swconfig_req_x2(struct rwnx_hw *rwnx_hw, uint32_t swconfig_id, int32_t *param_in, int32_t *param_out); + +int rwnx_send_get_fw_version_req(struct rwnx_hw *rwnx_hw, struct mm_get_fw_version_cfm *cfm); +int rwnx_send_txpwr_idx_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_txpwr_ofst_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_txpwr_ofst2x_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_txpwr_ofst2x_v2_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_set_filter(struct rwnx_hw *rwnx_hw, uint32_t filter); +int rwnx_send_txpwr_lvl_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_txpwr_lvl_v3_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_txpwr_lvl_v4_req(struct rwnx_hw *rwnx_hw); +int rwnx_send_txpwr_lvl_adj_req(struct rwnx_hw *rwnx_hw); +#ifdef CONFIG_WOWLAN +int rwnx_send_set_pkt_filter_req(struct rwnx_hw *rwnx_hw, u8_l *param); +int rwnx_send_dummy_reboot(struct rwnx_hw *rwnx_hw); +#endif +#ifdef CONFIG_DYNAMIC_PERPWR +int rwnx_send_txpwr_per_sta_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta); +#endif + + +//#ifdef CONFIG_USB_BT +int rwnx_send_reboot(struct rwnx_hw *rwnx_hw); +//#endif // CONFIG_USB_BT + + +#endif /* _RWNX_MSG_TX_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mu_group.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mu_group.c new file mode 100644 index 0000000..def09a8 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mu_group.c @@ -0,0 +1,659 @@ +/** + ****************************************************************************** + * + * @file rwnx_mu_group.c + * + * Copyright (C) RivieraWaves 2016-2019 + * + ****************************************************************************** + */ + +#include "rwnx_defs.h" +#include "rwnx_msg_tx.h" +#include "rwnx_events.h" + + +/** + * rwnx_mu_group_sta_init - Initialize group information for a STA + * + * @sta: Sta to initialize + */ +void rwnx_mu_group_sta_init(struct rwnx_sta *sta, + const struct ieee80211_vht_cap *vht_cap) +{ + sta->group_info.map = 0; + sta->group_info.cnt = 0; + sta->group_info.active.next = LIST_POISON1; + sta->group_info.update.next = LIST_POISON1; + sta->group_info.last_update = 0; + sta->group_info.traffic = 0; + sta->group_info.group = 0; + + if (!vht_cap || + !(vht_cap->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { + sta->group_info.map = RWNX_SU_GROUP; + } +} + +/** + * rwnx_mu_group_sta_del - Remove a sta from all MU group + * + * @rwnx_hw: main driver data + * @sta: STA to remove + * + * Remove one sta from all the MU groups it belongs to. + */ +void rwnx_mu_group_sta_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) +{ + struct rwnx_mu_info *mu = &rwnx_hw->mu; + int i, j, group_id; + bool lock_taken; + u64 map; + + lock_taken = (down_interruptible(&mu->lock) == 0); + + group_sta_for_each(sta, group_id, map) { + struct rwnx_mu_group *group = rwnx_mu_group_from_id(mu, group_id); + + for (i = 0; i < CONFIG_USER_MAX; i++) { + if (group->users[i] == sta) { + group->users[i] = NULL; + group->user_cnt --; + /* Don't keep group with only one user */ + if (group->user_cnt == 1) { + for (j = 0; j < CONFIG_USER_MAX; j++) { + if (group->users[j]) { + group->users[j]->group_info.cnt--; + group->users[j]->group_info.map &= ~BIT_ULL(group->group_id); + if (group->users[j]->group_info.group == group_id) + group->users[j]->group_info.group = 0; + group->user_cnt --; + break; + } + } + mu->group_cnt--; + trace_mu_group_delete(group->group_id); + } else { + trace_mu_group_update(group); + } + break; + } + } + + WARN((i == CONFIG_USER_MAX), "sta %d doesn't belongs to group %d", + sta->sta_idx, group_id); + } + + sta->group_info.map = 0; + sta->group_info.cnt = 0; + sta->group_info.traffic = 0; + + if (sta->group_info.active.next != LIST_POISON1) + list_del(&sta->group_info.active); + + if (sta->group_info.update.next != LIST_POISON1) + list_del(&sta->group_info.update); + + if (lock_taken) + up(&mu->lock); +} + +/** + * rwnx_mu_group_sta_get_map - Get the list of group a STA belongs to + * + * @sta: pointer to the sta + * + * @return the list of group a STA belongs to as a bitfield + */ +u64 rwnx_mu_group_sta_get_map(struct rwnx_sta *sta) +{ + if (sta) + return sta->group_info.map; + return 0; +} + +/** + * rwnx_mu_group_sta_get_pos - Get sta position in a group + * + * @rwnx_hw: main driver data + * @sta: pointer to the sta + * @group_id: Group id + * + * @return the positon of @sta in group @group_id or -1 if the sta + * doesn't belongs to the group (or group id is invalid) + */ +int rwnx_mu_group_sta_get_pos(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + int group_id) +{ + struct rwnx_mu_group *group; + int i; + + group = rwnx_mu_group_from_id(&rwnx_hw->mu, group_id); + if (!group) + return -1; + + for (i = 0; i < CONFIG_USER_MAX; i++) { + if (group->users[i] == sta) + return i; + } + + WARN(1, "sta %d doesn't belongs to group %d", + sta->sta_idx, group_id); + return -1; +} + +/** + * rwnx_mu_group_move_head - Move (or add) one element at the top of a list + * + * @list: list pointer + * @elem: element to move (or add) at the top of @list + * + */ +static inline +void rwnx_mu_group_move_head(struct list_head *list, struct list_head *elem) +{ + if (elem->next != LIST_POISON1) { + __list_del_entry(elem); + } + list_add(elem, list); +} + +/** + * rwnx_mu_group_remove_users - Remove all the users of a group + * + * @mu: pointer on MU info + * @group: pointer on group to remove users from + * + * Loop over all users one one group and remove this group from their + * map (and count). + * Each users is also added to the update_sta list, so that group info + * will be resent to fw for this user. + */ +static inline +void rwnx_mu_group_remove_users(struct rwnx_mu_info *mu, + struct rwnx_mu_group *group) +{ + struct rwnx_sta *sta; + int i, group_id = group->group_id; + + for (i = 0; i < CONFIG_USER_MAX; i++) { + if (group->users[i]) { + sta = group->users[i]; + group->users[i] = NULL; + sta->group_info.cnt--; + sta->group_info.map &= ~BIT_ULL(group_id); + rwnx_mu_group_move_head(&mu->update_sta, + &sta->group_info.update); + } + } + + if (group->user_cnt) + mu->group_cnt--; + group->user_cnt = 0; + trace_mu_group_delete(group_id); +} + +/** + * rwnx_mu_group_add_users - Add users to a group + * + * @mu: pointer on MU info + * @group: pointer on group to add users in + * @nb_user: number of users to ad + * @users: table of user to add + * + * Add @nb_users to @group (which may already have users) + * Each new users is added to the first free position. + * It is assume that @group has at least @nb_user free position. If it is not + * case it only add the number of users needed to complete the group. + * Each users (effectively added to @group) is also added to the update_sta + * list, so that group info will be resent to fw for this user. + */ +static inline +void rwnx_mu_group_add_users(struct rwnx_mu_info *mu, + struct rwnx_mu_group *group, + int nb_user, struct rwnx_sta **users) +{ + int i, j, group_id = group->group_id; + + if (!group->user_cnt) + mu->group_cnt++; + + j = 0; + for (i = 0; i < nb_user ; i++) { + for (; j < CONFIG_USER_MAX ; j++) { + if (group->users[j] == NULL) { + group->users[j] = users[i]; + users[i]->group_info.cnt ++; + users[i]->group_info.map |= BIT_ULL(group_id); + + rwnx_mu_group_move_head(&(mu->update_sta), + &(users[i]->group_info.update)); + group->user_cnt ++; + j ++; + break; + } + + WARN(j == (CONFIG_USER_MAX - 1), + "Too many user for group %d (nb_user=%d)", + group_id, group->user_cnt + nb_user - i); + } + } + + trace_mu_group_update(group); +} + + +/** + * rwnx_mu_group_create_one - create on group with a specific group of user + * + * @mu: pointer on MU info + * @nb_user: number of user to include in the group (<= CONFIG_USER_MAX) + * @users: table of users + * + * Try to create a new group with a specific group of users. + * 1- First it checks if a group containing all this users already exists. + * + * 2- Then it checks if it is possible to complete a group which already + * contains at least one user. + * + * 3- Finally it create a new group. To do so, it take take the last group of + * the active_groups list, remove all its current users and add the new ones + * + * In all cases, the group selected is moved at the top of the active_groups + * list + * + * @return 1 if a new group has been created and 0 otherwise + */ +static +int rwnx_mu_group_create_one(struct rwnx_mu_info *mu, int nb_user, + struct rwnx_sta **users, int *nb_group_left) +{ + int i, group_id; + struct rwnx_mu_group *group; + u64 group_match; + u64 group_avail; + + group_match = users[0]->group_info.map; + group_avail = users[0]->group_info.map; + for (i = 1; i < nb_user ; i++) { + group_match &= users[i]->group_info.map; + group_avail |= users[i]->group_info.map; + + } + + if (group_match) { + /* a group (or more) with all the users already exist */ + group_id = RWNX_GET_FIRST_GROUP_ID(group_match); + group = rwnx_mu_group_from_id(mu, group_id); + rwnx_mu_group_move_head(&mu->active_groups, &group->list); + return 0; + } + +#if CONFIG_USER_MAX > 2 + if (group_avail) { + /* check if we can complete a group */ + struct rwnx_sta *users2[CONFIG_USER_MAX]; + int nb_user2; + + group_for_each(group_id, group_avail) { + group = rwnx_mu_group_from_id(mu, group_id); + if (group->user_cnt == CONFIG_USER_MAX) + continue; + + nb_user2 = 0; + for (i = 0; i < nb_user ; i++) { + if (!(users[i]->group_info.map & BIT_ULL(group_id))) { + users2[nb_user2] = users[i]; + nb_user2++; + } + } + + if ((group->user_cnt + nb_user2) <= CONFIG_USER_MAX) { + rwnx_mu_group_add_users(mu, group, nb_user2, users2); + rwnx_mu_group_move_head(&mu->active_groups, &group->list); + return 0; + } + } + } +#endif /* CONFIG_USER_MAX > 2*/ + + /* create a new group */ + group = list_last_entry(&mu->active_groups, struct rwnx_mu_group, list); + rwnx_mu_group_remove_users(mu, group); + rwnx_mu_group_add_users(mu, group, nb_user, users); + rwnx_mu_group_move_head(&mu->active_groups, &group->list); + (*nb_group_left)--; + + return 1; +} + +/** + * rwnx_mu_group_create - Create new groups containing one specific sta + * + * @mu: pointer on MU info + * @sta: sta to add in each group + * @nb_group_left: maximum number to new group allowed. (updated on exit) + * + * This will try to create "all the possible" group with a specific sta being + * a member of all these group. + * The function simply loops over the @active_sta list (starting from @sta). + * When it has (CONFIG_USER_MAX - 1) users it try to create a new group with + * these users (plus @sta). + * Loops end when there is no more users, or no more new group is allowed + * + */ +static +void rwnx_mu_group_create(struct rwnx_mu_info *mu, struct rwnx_sta *sta, + int *nb_group_left) +{ + struct rwnx_sta *user_sta = sta; + struct rwnx_sta *users[CONFIG_USER_MAX]; + int nb_user = 1; + + users[0] = sta; + while (*nb_group_left) { + + list_for_each_entry_continue(user_sta, &mu->active_sta, group_info.active) { + users[nb_user] = user_sta; + if (++nb_user == CONFIG_USER_MAX) { + break; + } + } + + if (nb_user > 1) { + if (rwnx_mu_group_create_one(mu, nb_user, users, nb_group_left)) + (*nb_group_left)--; + + if (nb_user < CONFIG_USER_MAX) + break; + else + nb_user = 1; + } else + break; + } +} + +/** + * rwnx_mu_group_work - process function of the "group_work" + * + * The work is scheduled when several sta (MU beamformee capable) are active. + * When called, the @active_sta contains the list of the active sta (starting + * from the most recent one), and @active_groups is the list of all possible + * groups ordered so that the first one is the most recently used. + * + * This function will create new groups, starting from group containing the + * most "active" sta. + * For example if the list of sta is : + * sta8 -> sta3 -> sta4 -> sta7 -> sta1 + * and the number of user per group is 3, it will create grooups : + * - sta8 / sta3 / sta4 + * - sta8 / sta7 / sta1 + * - sta3 / sta4 / sta7 + * - sta3 / sta1 + * - sta4 / sta7 / sta1 + * - sta7 / sta1 + * + * To create new group, the least used group are first selected. + * It is only allowed to create NX_MU_GROUP_MAX per iteration. + * + * Once groups have been updated, mu group information is update to the fw. + * To do so it use the @update_sta list to know which sta has been affected. + * As it is necessary to wait for fw confirmation before using this new group + * MU is temporarily disabled during group update + * + * Work is then rescheduled. + * + * At the end of the function, both @active_sta and @update_sta list are empty. + * + * Note: + * - This is still a WIP, and will require more tuning + * - not all combinations are created, to avoid to much processing. + * - reschedule delay should be adaptative + */ +void rwnx_mu_group_work(struct work_struct *ws) +{ + struct delayed_work *dw = container_of(ws, struct delayed_work, work); + struct rwnx_mu_info *mu = container_of(dw, struct rwnx_mu_info, group_work); + struct rwnx_hw *rwnx_hw = container_of(mu, struct rwnx_hw, mu); + struct rwnx_sta *sta, *next; + int nb_group_left = NX_MU_GROUP_MAX; + + if (WARN(!rwnx_hw->mod_params->mutx, + "In group formation work, but mutx disabled")) + return; + + if (down_interruptible(&mu->lock) != 0) + return; + + mu->update_count++; + if (!mu->update_count) + mu->update_count++; + + list_for_each_entry_safe(sta, next, &mu->active_sta, group_info.active) { + if (nb_group_left) + rwnx_mu_group_create(mu, sta, &nb_group_left); + + sta->group_info.last_update = mu->update_count; + list_del(&sta->group_info.active); + } + + if (! list_empty(&mu->update_sta)) { + list_for_each_entry_safe(sta, next, &mu->update_sta, group_info.update) { + rwnx_send_mu_group_update_req(rwnx_hw, sta); + list_del(&sta->group_info.update); + } + } + + mu->next_group_select = jiffies; + rwnx_mu_group_sta_select(rwnx_hw); + up(&mu->lock); + + return; +} + +/** + * rwnx_mu_group_init - Initialize MU groups + * + * @rwnx_hw: main driver data + * + * Initialize all MU group + */ +void rwnx_mu_group_init(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_mu_info *mu = &rwnx_hw->mu; + int i; + + INIT_LIST_HEAD(&mu->active_groups); + INIT_LIST_HEAD(&mu->active_sta); + INIT_LIST_HEAD(&mu->update_sta); + + for (i = 0; i < NX_MU_GROUP_MAX; i++) { + int j; + mu->groups[i].user_cnt = 0; + mu->groups[i].group_id = i + 1; + for (j = 0; j < CONFIG_USER_MAX; j++) { + mu->groups[i].users[j] = NULL; + } + list_add(&mu->groups[i].list, &mu->active_groups); + } + + mu->update_count = 1; + mu->group_cnt = 0; + mu->next_group_select = jiffies; + INIT_DELAYED_WORK(&mu->group_work, rwnx_mu_group_work); + sema_init(&mu->lock, 1); +} + +/** + * rwnx_mu_set_active_sta - mark a STA as active + * + * @rwnx_hw: main driver data + * @sta: pointer to the sta + * @traffic: Number of buffers to add in the sta's traffic counter + * + * If @sta is MU beamformee capable (and MU-MIMO tx is enabled) move the + * sta at the top of the @active_sta list. + * It also schedule the group_work if not already scheduled and the list + * contains more than one sta. + * + * If a STA was already in the list during the last group update + * (i.e. sta->group_info.last_update == mu->update_count) it is not added + * back to the list until a sta that wasn't active during the last update is + * added. This is to avoid scheduling group update with a list of sta that + * were all already in the list during previous update. + * + * It is called with mu->lock taken. + */ +void rwnx_mu_set_active_sta(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + int traffic) +{ + struct rwnx_mu_info *mu = &rwnx_hw->mu; + + if (!sta || (sta->group_info.map & RWNX_SU_GROUP)) + return; + + sta->group_info.traffic += traffic; + + if ((sta->group_info.last_update != mu->update_count) || + !list_empty(&mu->active_sta)) { + + rwnx_mu_group_move_head(&mu->active_sta, &sta->group_info.active); + + if (!delayed_work_pending(&mu->group_work) && + !list_is_singular(&mu->active_sta)) { + schedule_delayed_work(&mu->group_work, + msecs_to_jiffies(RWNX_MU_GROUP_INTERVAL)); + } + } +} + +/** + * rwnx_mu_set_active_group - mark a MU group as active + * + * @rwnx_hw: main driver data + * @group_id: Group id + * + * move a group at the top of the @active_groups list + */ +void rwnx_mu_set_active_group(struct rwnx_hw *rwnx_hw, int group_id) +{ + struct rwnx_mu_info *mu = &rwnx_hw->mu; + struct rwnx_mu_group *group = rwnx_mu_group_from_id(mu, group_id); + + rwnx_mu_group_move_head(&mu->active_groups, &group->list); +} + + +/** + * rwnx_mu_group_sta_select - Select the best group for MU stas + * + * @rwnx_hw: main driver data + * + * For each MU capable client of AP interfaces this function tries to select + * the best group to use. + * + * In first pass, gather information from all stations to form statistics + * for each group for the previous @RWNX_MU_GROUP_SELECT_INTERVAL interval: + * - number of buffers transmitted + * - number of user + * + * Then groups with more than 2 active users, are assigned after being ordered + * by traffic : + * - group with highest traffic is selected: set this group for all its users + * - update nb_users for all others group (as one sta may be in several groups) + * - select the next group that have still mor than 2 users and assign it. + * - continue until all group are processed + * + */ +void rwnx_mu_group_sta_select(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_mu_info *mu = &rwnx_hw->mu; + int nb_users[NX_MU_GROUP_MAX + 1]; + int traffic[NX_MU_GROUP_MAX + 1]; + int order[NX_MU_GROUP_MAX + 1]; + struct rwnx_sta *sta; + struct rwnx_vif *vif; + struct list_head *head; + u64 map; + int i, j, update, group_id, tmp, cnt = 0; + + if (!mu->group_cnt || time_before(jiffies, mu->next_group_select)) + return; + + list_for_each_entry(vif, &rwnx_hw->vifs, list) { + + if (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_AP) + continue; + +#ifdef CONFIG_RWNX_FULLMAC + head = &vif->ap.sta_list; +#else + head = &vif->stations; +#endif /* CONFIG_RWNX_FULLMAC */ + + memset(nb_users, 0, sizeof(nb_users)); + memset(traffic, 0, sizeof(traffic)); + list_for_each_entry(sta, head, list) { + int sta_traffic = sta->group_info.traffic; + + /* reset statistics for next selection */ + sta->group_info.traffic = 0; + if (sta->group_info.group) + trace_mu_group_selection(sta, 0); + sta->group_info.group = 0; + + if (sta->group_info.cnt == 0 || + sta_traffic < RWNX_MU_GROUP_MIN_TRAFFIC) + continue; + + group_sta_for_each(sta, group_id, map) { + nb_users[group_id]++; + traffic[group_id] += sta_traffic; + + /* list group with 2 users or more */ + if (nb_users[group_id] == 2) + order[cnt++] = group_id; + } + } + + /* reorder list of group with more that 2 users */ + update = 1; + while(update) { + update = 0; + for (i = 0; i < cnt - 1; i++) { + if (traffic[order[i]] < traffic[order[i + 1]]) { + tmp = order[i]; + order[i] = order[i + 1]; + order[i + 1] = tmp; + update = 1; + } + } + } + + /* now assign group in traffic order */ + for (i = 0; i < cnt ; i ++) { + struct rwnx_mu_group *group; + group_id = order[i]; + + if (nb_users[group_id] < 2) + continue; + + group = rwnx_mu_group_from_id(mu, group_id); + for (j = 0; j < CONFIG_USER_MAX ; j++) { + if (group->users[j]) { + trace_mu_group_selection(group->users[j], group_id); + group->users[j]->group_info.group = group_id; + + group_sta_for_each(group->users[j], tmp, map) { + if (group_id != tmp) + nb_users[tmp]--; + } + } + } + } + } + + mu->next_group_select = jiffies + + msecs_to_jiffies(RWNX_MU_GROUP_SELECT_INTERVAL); + mu->next_group_select |= 1; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mu_group.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mu_group.h new file mode 100644 index 0000000..338821e --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_mu_group.h @@ -0,0 +1,179 @@ +/** + ****************************************************************************** + * + * @file rwnx_mu_group.h + * + * Copyright (C) RivieraWaves 2016-2019 + * + ****************************************************************************** + */ +#ifndef _RWNX_MU_GROUP_H_ +#define _RWNX_MU_GROUP_H_ + +#include +#include + +struct rwnx_hw; +struct rwnx_sta; + +#ifdef CONFIG_RWNX_MUMIMO_TX + +/** + * struct rwnx_sta_group_info - Group Information for a STA + * + * @active: node for @mu->active_sta list + * @update: node for @mu->update_sta list + * @cnt: Number of groups the STA belongs to + * @map: Bitfield of groups the sta belongs to + * @traffic: Number of buffers sent since previous group selection + * @group: Id of the group selected by previous group selection + * (cf @rwnx_mu_group_sta_select) + */ +struct rwnx_sta_group_info { + struct list_head active; + struct list_head update; + u16 last_update; + int cnt; + u64 map; + int traffic; + u8 group; +}; + +/** + * struct mu_group_info - Information about the users of a group + * + * @list: node for mu->active_groups + * @group_id: Group identifier + * @user_cnt: Number of the users in the group + * @users: Pointer to the sta, ordered by user position + */ +struct rwnx_mu_group { + struct list_head list; + int group_id; + int user_cnt; + struct rwnx_sta *users[CONFIG_USER_MAX]; +}; + +/** + * struct rwnx_mu_info - Information about all MU group + * + * @active_groups: List of all possible groups. Ordered from the most recently + * used one to the least one (and possibly never used) + * @active_sta: List of MU beamformee sta that have been active (since previous + * group update). Ordered from the most recently active. + * @update_sta: List of sta whose group information has changed and need to be + * updated at fw level + * @groups: Table of all groups + * @group_work: Work item used to schedule group update + * @update_count: Counter used to identify the last group formation update. + * (cf rwnx_sta_group_info.last_update) + * @lock: Lock taken during group update. If tx happens lock is taken, then tx + * will not used MU. + * @next_group_assign: Next time the group selection should be run + * (ref @rwnx_mu_group_sta_select) + * @group_cnt: Number of group created + */ +struct rwnx_mu_info { + struct list_head active_groups; + struct list_head active_sta; + struct list_head update_sta; + struct rwnx_mu_group groups[NX_MU_GROUP_MAX]; + struct delayed_work group_work; + u16 update_count; + struct semaphore lock; + unsigned long next_group_select; + u8 group_cnt; +}; + +#define RWNX_SU_GROUP BIT_ULL(0) +#define RWNX_MU_GROUP_MASK 0x7ffffffffffffffeULL +#define RWNX_MU_GROUP_INTERVAL 200 /* in ms */ +#define RWNX_MU_GROUP_SELECT_INTERVAL 100 /* in ms */ +// minimum traffic in a RWNX_MU_GROUP_SELECT_INTERVAL to consider the sta +#define RWNX_MU_GROUP_MIN_TRAFFIC 50 /* in number of packet */ + + +#define RWNX_GET_FIRST_GROUP_ID(map) (fls64(map) - 1) + +#define group_sta_for_each(sta, id, map) \ + map = sta->group_info.map & RWNX_MU_GROUP_MASK; \ + for (id = (fls64(map) - 1) ; id > 0 ; \ + map &= ~(u64)BIT_ULL(id), id = (fls64(map) - 1)) + +#define group_for_each(id, map) \ + for (id = (fls64(map) - 1) ; id > 0 ; \ + map &= ~(u64)BIT_ULL(id), id = (fls64(map) - 1)) + +#define RWNX_MUMIMO_INFO_POS_ID(info) (((info) >> 6) & 0x3) +#define RWNX_MUMIMO_INFO_GROUP_ID(info) ((info) & 0x3f) + +static inline +struct rwnx_mu_group *rwnx_mu_group_from_id(struct rwnx_mu_info *mu, int id) +{ + if (id > NX_MU_GROUP_MAX) + return NULL; + + return &mu->groups[id - 1]; +} + + +void rwnx_mu_group_sta_init(struct rwnx_sta *sta, + const struct ieee80211_vht_cap *vht_cap); +void rwnx_mu_group_sta_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta); +u64 rwnx_mu_group_sta_get_map(struct rwnx_sta *sta); +int rwnx_mu_group_sta_get_pos(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + int group_id); + +void rwnx_mu_group_init(struct rwnx_hw *rwnx_hw); + +void rwnx_mu_set_active_sta(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + int traffic); +void rwnx_mu_set_active_group(struct rwnx_hw *rwnx_hw, int group_id); +void rwnx_mu_group_sta_select(struct rwnx_hw *rwnx_hw); + + +#else /* ! CONFIG_RWNX_MUMIMO_TX */ + +static inline +void rwnx_mu_group_sta_init(struct rwnx_sta *sta, + const struct ieee80211_vht_cap *vht_cap) +{} + +static inline +void rwnx_mu_group_sta_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) +{} + +static inline +u64 rwnx_mu_group_sta_get_map(struct rwnx_sta *sta) +{ + return 0; +} + +static inline +int rwnx_mu_group_sta_get_pos(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + int group_id) +{ + return 0; +} + +static inline +void rwnx_mu_group_init(struct rwnx_hw *rwnx_hw) +{} + +static inline +void rwnx_mu_set_active_sta(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + int traffic) +{} + +static inline +void rwnx_mu_set_active_group(struct rwnx_hw *rwnx_hw, int group_id) +{} + +static inline +void rwnx_mu_group_sta_select(struct rwnx_hw *rwnx_hw) +{} + +#endif /* CONFIG_RWNX_MUMIMO_TX */ + +#endif /* _RWNX_MU_GROUP_H_ */ + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_pci.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_pci.c new file mode 100644 index 0000000..cd207c8 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_pci.c @@ -0,0 +1,94 @@ +/** + ****************************************************************************** + * + * @file rwnx_pci.c + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#include +#include + +#include "rwnx_defs.h" +#include "rwnx_dini.h" +#include "rwnx_v7.h" + +#define PCI_VENDOR_ID_DINIGROUP 0x17DF +#define PCI_DEVICE_ID_DINIGROUP_DNV6_F2PCIE 0x1907 + +#define PCI_DEVICE_ID_XILINX_CEVA_VIRTEX7 0x7011 + +static const struct pci_device_id rwnx_pci_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_DINIGROUP, PCI_DEVICE_ID_DINIGROUP_DNV6_F2PCIE)}, + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILINX_CEVA_VIRTEX7)}, + {0,} +}; + + +/* Uncomment this for depmod to create module alias */ +/* We don't want this on development platform */ +//MODULE_DEVICE_TABLE(pci, rwnx_pci_ids); + +static int rwnx_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *pci_id) +{ + struct rwnx_plat *rwnx_plat = NULL; + void *drvdata; + int ret = -ENODEV; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (pci_id->vendor == PCI_VENDOR_ID_DINIGROUP) { + ret = rwnx_dini_platform_init(pci_dev, &rwnx_plat); + } else if (pci_id->vendor == PCI_VENDOR_ID_XILINX) { + ret = rwnx_v7_platform_init(pci_dev, &rwnx_plat); + } + + if (ret) + return ret; + + rwnx_plat->pci_dev = pci_dev; + + ret = rwnx_platform_init(rwnx_plat, &drvdata); + pci_set_drvdata(pci_dev, drvdata); + + if (ret) + rwnx_plat->deinit(rwnx_plat); + + return ret; +} + +static void rwnx_pci_remove(struct pci_dev *pci_dev) +{ + struct rwnx_hw *rwnx_hw; + struct rwnx_plat *rwnx_plat; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + rwnx_hw = pci_get_drvdata(pci_dev); + rwnx_plat = rwnx_hw->plat; + + rwnx_platform_deinit(rwnx_hw); + rwnx_plat->deinit(rwnx_plat); + + pci_set_drvdata(pci_dev, NULL); +} + +static struct pci_driver rwnx_pci_drv = { + .name = KBUILD_MODNAME, + .id_table = rwnx_pci_ids, + .probe = rwnx_pci_probe, + .remove = rwnx_pci_remove +}; + +int rwnx_pci_register_drv(void) +{ + return pci_register_driver(&rwnx_pci_drv); +} + +void rwnx_pci_unregister_drv(void) +{ + pci_unregister_driver(&rwnx_pci_drv); +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_pci.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_pci.h new file mode 100644 index 0000000..d81578c --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_pci.h @@ -0,0 +1,17 @@ +/** + ****************************************************************************** + * + * @file rwnx_pci.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_PCI_H_ +#define _RWNX_PCI_H_ + +int rwnx_pci_register_drv(void); +void rwnx_pci_unregister_drv(void); + +#endif /* _RWNX_PCI_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_platform.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_platform.c new file mode 100644 index 0000000..0392d4c --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_platform.c @@ -0,0 +1,3723 @@ +/** + ****************************************************************************** + * + * @file rwnx_platform.c + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#include +#include +#include +#include + +#include "rwnx_platform.h" +#include "reg_access.h" +#include "hal_desc.h" +#include "rwnx_main.h" +#include "rwnx_pci.h" +#ifndef CONFIG_RWNX_FHOST +#include "ipc_host.h" +#endif /* !CONFIG_RWNX_FHOST */ +#include "rwnx_msg_tx.h" + +#ifdef AICWF_SDIO_SUPPORT +#include "aicwf_sdio.h" +#endif + +#ifdef AICWF_USB_SUPPORT +#include "aicwf_usb.h" +#endif +#include "md5.h" +#include "aicwf_compat_8800dc.h" +#include "aicwf_compat_8800d80.h" +#include "aicwf_compat_8800d80x2.h" +#ifdef CONFIG_USE_FW_REQUEST +#include +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_dentry->d_inode; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ +struct rwnx_plat *g_rwnx_plat = NULL; + +#define FW_PATH_MAX_LEN 200 +extern char aic_fw_path[FW_PATH_MAX_LEN]; + +//Parser state +#define INIT 0 +#define CMD 1 +#define PRINT 2 +#define GET_VALUE 3 + +userconfig_info_t userconfig_info = { + .txpwr_lvl = { + .enable = 1, + .dsss = 9, + .ofdmlowrate_2g4 = 8, + .ofdm64qam_2g4 = 8, + .ofdm256qam_2g4 = 8, + .ofdm1024qam_2g4 = 8, + .ofdmlowrate_5g = 11, + .ofdm64qam_5g = 10, + .ofdm256qam_5g = 9, + .ofdm1024qam_5g = 9 + }, + .txpwr_lvl_v2 = { + .enable = 1, + .pwrlvl_11b_11ag_2g4 = + //1M, 2M, 5M5, 11M, 6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M + { 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 16, 16}, + .pwrlvl_11n_11ac_2g4 = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16}, + .pwrlvl_11ax_2g4 = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16, 15, 15}, + }, + .txpwr_lvl_v3 = { + .enable = 1, + .pwrlvl_11b_11ag_2g4 = + //1M, 2M, 5M5, 11M, 6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M + { 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 16, 16}, + .pwrlvl_11n_11ac_2g4 = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16}, + .pwrlvl_11ax_2g4 = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16, 15, 15}, + .pwrlvl_11a_5g = + //NA, NA, NA, NA, 6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M + { 0x80, 0x80, 0x80, 0x80, 20, 20, 20, 20, 18, 18, 16, 16}, + .pwrlvl_11n_11ac_5g = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 15}, + .pwrlvl_11ax_5g = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 15, 14, 14}, + }, + .txpwr_lvl_v4 = { + .enable = 1, + .pwrlvl_11b_11ag_2g4 = + //1M, 2M, 5M5, 11M, 6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M + { 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 16, 16}, + .pwrlvl_11n_11ac_2g4 = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16}, + .pwrlvl_11ax_2g4 = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16, 15, 15}, + .pwrlvl_11a_5g = + //6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M + { 20, 20, 20, 20, 18, 18, 16, 16}, + .pwrlvl_11n_11ac_5g = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 15}, + .pwrlvl_11ax_5g = + //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11 + { 20, 20, 20, 20, 18, 18, 16, 16, 16, 15, 14, 14}, + }, + .txpwr_loss = { + .loss_enable_2g4 = 0, + .loss_value_2g4 = 0, + .loss_enable_5g = 0, + .loss_value_5g = 0, + }, + .txpwr_ofst = { + .enable = 1, + .chan_1_4 = 0, + .chan_5_9 = 0, + .chan_10_13 = 0, + .chan_36_64 = 0, + .chan_100_120 = 0, + .chan_122_140 = 0, + .chan_142_165 = 0, + }, + .txpwr_ofst2x = { + .enable = 0, + .pwrofst2x_tbl_2g4 = + { // ch1-4, ch5-9, ch10-13 + { 0, 0, 0 }, // 11b + { 0, 0, 0 }, // ofdm_highrate + { 0, 0, 0 }, // ofdm_lowrate + }, + .pwrofst2x_tbl_5g = + { // ch42, ch58, ch106,ch122,ch138,ch155 + { 0, 0, 0, 0, 0, 0 }, // ofdm_lowrate + { 0, 0, 0, 0, 0, 0 }, // ofdm_highrate + { 0, 0, 0, 0, 0, 0 }, // ofdm_midrate + }, + }, + .txpwr_ofst2x_v2 = { + .enable = 0, + .pwrofst_flags = 0, + .pwrofst2x_tbl_2g4_ant0 = + { // 11b, ofdm_highrate, ofdm_lowrate + { 0, 0, 0 }, // ch1-4 + { 0, 0, 0 }, // ch5-9 + { 0, 0, 0 }, // ch10-13 + }, + .pwrofst2x_tbl_2g4_ant1 = + { // 11b, ofdm_highrate, ofdm_lowrate + { 0, 0, 0 }, // ch1-4 + { 0, 0, 0 }, // ch5-9 + { 0, 0, 0 }, // ch10-13 + }, + .pwrofst2x_tbl_5g_ant0 = + { // ofdm_highrate, ofdm_lowrate, ofdm_midrate + { 0, 0, 0 }, // ch42 + { 0, 0, 0 }, // ch58 + { 0, 0, 0 }, // ch106 + { 0, 0, 0 }, // ch122 + { 0, 0, 0 }, // ch138 + { 0, 0, 0 }, // ch155 + }, + .pwrofst2x_tbl_5g_ant1 = + { // ofdm_highrate, ofdm_lowrate, ofdm_midrate + { 0, 0, 0 }, // ch42 + { 0, 0, 0 }, // ch58 + { 0, 0, 0 }, // ch106 + { 0, 0, 0 }, // ch122 + { 0, 0, 0 }, // ch138 + { 0, 0, 0 }, // ch155 + }, + .pwrofst2x_tbl_6g_ant0 = { 0, }, // ofdm_highrate: 6e_ch7 ~ 6e_ch229 + .pwrofst2x_tbl_6g_ant1 = { 0, }, // ofdm_highrate: 6e_ch7 ~ 6e_ch229 + }, + .xtal_cap = { + .enable = 0, + .xtal_cap = 24, + .xtal_cap_fine = 31, + }, +}; + +typedef struct { + char ccode[3]; + Regions_code region; +} reg_table; + +/* If the region conflicts with the kernel, the actual authentication standard prevails */ +reg_table reg_tables[] = { + {.ccode = "CN", .region = REGIONS_SRRC}, + {.ccode = "US", .region = REGIONS_FCC}, + {.ccode = "DE", .region = REGIONS_ETSI}, + {.ccode = "00", .region = REGIONS_DEFAULT}, + {.ccode = "WW", .region = REGIONS_DEFAULT}, + {.ccode = "XX", .region = REGIONS_DEFAULT}, + {.ccode = "JP", .region = REGIONS_JP}, + {.ccode = "AD", .region = REGIONS_ETSI}, + {.ccode = "AE", .region = REGIONS_ETSI}, + {.ccode = "AF", .region = REGIONS_ETSI}, + {.ccode = "AI", .region = REGIONS_ETSI}, + {.ccode = "AL", .region = REGIONS_ETSI}, + {.ccode = "AM", .region = REGIONS_ETSI}, + {.ccode = "AN", .region = REGIONS_ETSI}, + {.ccode = "AR", .region = REGIONS_FCC}, + {.ccode = "AS", .region = REGIONS_FCC}, + {.ccode = "AT", .region = REGIONS_ETSI}, + {.ccode = "AU", .region = REGIONS_ETSI}, + {.ccode = "AW", .region = REGIONS_ETSI}, + {.ccode = "AZ", .region = REGIONS_ETSI}, + {.ccode = "BA", .region = REGIONS_ETSI}, + {.ccode = "BB", .region = REGIONS_FCC}, + {.ccode = "BD", .region = REGIONS_JP}, + {.ccode = "BE", .region = REGIONS_ETSI}, + {.ccode = "BF", .region = REGIONS_FCC}, + {.ccode = "BG", .region = REGIONS_ETSI}, + {.ccode = "BH", .region = REGIONS_ETSI}, + {.ccode = "BL", .region = REGIONS_ETSI}, + {.ccode = "BM", .region = REGIONS_FCC}, + {.ccode = "BN", .region = REGIONS_JP}, + {.ccode = "BO", .region = REGIONS_JP}, + {.ccode = "BR", .region = REGIONS_FCC}, + {.ccode = "BS", .region = REGIONS_FCC}, + {.ccode = "BT", .region = REGIONS_ETSI}, + {.ccode = "BW", .region = REGIONS_ETSI}, + {.ccode = "BY", .region = REGIONS_ETSI}, + {.ccode = "BZ", .region = REGIONS_JP}, + {.ccode = "CA", .region = REGIONS_FCC}, + {.ccode = "CF", .region = REGIONS_FCC}, + {.ccode = "CH", .region = REGIONS_ETSI}, + {.ccode = "CI", .region = REGIONS_FCC}, + {.ccode = "CL", .region = REGIONS_ETSI}, + {.ccode = "CO", .region = REGIONS_FCC}, + {.ccode = "CR", .region = REGIONS_FCC}, + {.ccode = "CX", .region = REGIONS_FCC}, + {.ccode = "CY", .region = REGIONS_ETSI}, + {.ccode = "CZ", .region = REGIONS_ETSI}, + {.ccode = "DK", .region = REGIONS_ETSI}, + {.ccode = "DM", .region = REGIONS_FCC}, + {.ccode = "DO", .region = REGIONS_FCC}, + {.ccode = "DZ", .region = REGIONS_JP}, + {.ccode = "EC", .region = REGIONS_FCC}, + {.ccode = "EE", .region = REGIONS_ETSI}, + {.ccode = "EG", .region = REGIONS_ETSI}, + {.ccode = "ES", .region = REGIONS_ETSI}, + {.ccode = "ET", .region = REGIONS_ETSI}, + {.ccode = "FI", .region = REGIONS_ETSI}, + {.ccode = "FM", .region = REGIONS_FCC}, + {.ccode = "FR", .region = REGIONS_ETSI}, + {.ccode = "GB", .region = REGIONS_ETSI}, + {.ccode = "GD", .region = REGIONS_FCC}, + {.ccode = "GE", .region = REGIONS_ETSI}, + {.ccode = "GF", .region = REGIONS_ETSI}, + {.ccode = "GH", .region = REGIONS_FCC}, + {.ccode = "GI", .region = REGIONS_ETSI}, + {.ccode = "GL", .region = REGIONS_ETSI}, + {.ccode = "GP", .region = REGIONS_ETSI}, + {.ccode = "GR", .region = REGIONS_ETSI}, + {.ccode = "GT", .region = REGIONS_FCC}, + {.ccode = "GU", .region = REGIONS_FCC}, + {.ccode = "GY", .region = REGIONS_DEFAULT}, + {.ccode = "HK", .region = REGIONS_ETSI}, + {.ccode = "HN", .region = REGIONS_FCC}, + {.ccode = "HR", .region = REGIONS_ETSI}, + {.ccode = "HT", .region = REGIONS_FCC}, + {.ccode = "HU", .region = REGIONS_ETSI}, + {.ccode = "ID", .region = REGIONS_ETSI}, + {.ccode = "IE", .region = REGIONS_ETSI}, + {.ccode = "IL", .region = REGIONS_ETSI}, + {.ccode = "IN", .region = REGIONS_ETSI}, + {.ccode = "IQ", .region = REGIONS_ETSI}, + {.ccode = "IR", .region = REGIONS_JP}, + {.ccode = "IS", .region = REGIONS_ETSI}, + {.ccode = "IT", .region = REGIONS_ETSI}, + {.ccode = "JM", .region = REGIONS_FCC}, + {.ccode = "JO", .region = REGIONS_ETSI}, + {.ccode = "KE", .region = REGIONS_ETSI}, + {.ccode = "KG", .region = REGIONS_ETSI}, + {.ccode = "KH", .region = REGIONS_ETSI}, + {.ccode = "KN", .region = REGIONS_ETSI}, + {.ccode = "KP", .region = REGIONS_JP}, + {.ccode = "KR", .region = REGIONS_ETSI}, + {.ccode = "KW", .region = REGIONS_ETSI}, + {.ccode = "KY", .region = REGIONS_FCC}, + {.ccode = "KZ", .region = REGIONS_DEFAULT}, + {.ccode = "LB", .region = REGIONS_ETSI}, + {.ccode = "LC", .region = REGIONS_ETSI}, + {.ccode = "LI", .region = REGIONS_ETSI}, + {.ccode = "LK", .region = REGIONS_FCC}, + {.ccode = "LS", .region = REGIONS_ETSI}, + {.ccode = "LT", .region = REGIONS_ETSI}, + {.ccode = "LU", .region = REGIONS_ETSI}, + {.ccode = "LV", .region = REGIONS_ETSI}, + {.ccode = "LY", .region = REGIONS_ETSI}, + {.ccode = "MA", .region = REGIONS_ETSI}, + {.ccode = "MC", .region = REGIONS_ETSI}, + {.ccode = "MD", .region = REGIONS_ETSI}, + {.ccode = "ME", .region = REGIONS_ETSI}, + {.ccode = "MF", .region = REGIONS_ETSI}, + {.ccode = "MH", .region = REGIONS_FCC}, + {.ccode = "MK", .region = REGIONS_ETSI}, + {.ccode = "MN", .region = REGIONS_ETSI}, + {.ccode = "MO", .region = REGIONS_ETSI}, + {.ccode = "MP", .region = REGIONS_FCC}, + {.ccode = "MQ", .region = REGIONS_ETSI}, + {.ccode = "MR", .region = REGIONS_ETSI}, + {.ccode = "MT", .region = REGIONS_ETSI}, + {.ccode = "MU", .region = REGIONS_FCC}, + {.ccode = "MV", .region = REGIONS_ETSI}, + {.ccode = "MW", .region = REGIONS_ETSI}, + {.ccode = "MX", .region = REGIONS_FCC}, + {.ccode = "MY", .region = REGIONS_FCC}, + {.ccode = "NA", .region = REGIONS_ETSI}, + {.ccode = "NG", .region = REGIONS_ETSI}, + {.ccode = "NI", .region = REGIONS_FCC}, + {.ccode = "NL", .region = REGIONS_ETSI}, + {.ccode = "NO", .region = REGIONS_ETSI}, + {.ccode = "NP", .region = REGIONS_JP}, + {.ccode = "NZ", .region = REGIONS_ETSI}, + {.ccode = "OM", .region = REGIONS_ETSI}, + {.ccode = "PA", .region = REGIONS_FCC}, + {.ccode = "PE", .region = REGIONS_FCC}, + {.ccode = "PF", .region = REGIONS_ETSI}, + {.ccode = "PG", .region = REGIONS_FCC}, + {.ccode = "PH", .region = REGIONS_FCC}, + {.ccode = "PK", .region = REGIONS_ETSI}, + {.ccode = "PL", .region = REGIONS_ETSI}, + {.ccode = "PM", .region = REGIONS_ETSI}, + {.ccode = "PR", .region = REGIONS_FCC}, + {.ccode = "PT", .region = REGIONS_ETSI}, + {.ccode = "PW", .region = REGIONS_FCC}, + {.ccode = "PY", .region = REGIONS_FCC}, + {.ccode = "QA", .region = REGIONS_ETSI}, + {.ccode = "RE", .region = REGIONS_ETSI}, + {.ccode = "RO", .region = REGIONS_ETSI}, + {.ccode = "RS", .region = REGIONS_ETSI}, + {.ccode = "RU", .region = REGIONS_ETSI}, + {.ccode = "RW", .region = REGIONS_FCC}, + {.ccode = "SA", .region = REGIONS_ETSI}, + {.ccode = "SE", .region = REGIONS_ETSI}, + {.ccode = "SG", .region = REGIONS_ETSI}, + {.ccode = "SI", .region = REGIONS_ETSI}, + {.ccode = "SK", .region = REGIONS_ETSI}, + {.ccode = "SM", .region = REGIONS_ETSI}, + {.ccode = "SN", .region = REGIONS_FCC}, + {.ccode = "SR", .region = REGIONS_ETSI}, + {.ccode = "SV", .region = REGIONS_FCC}, + {.ccode = "SY", .region = REGIONS_DEFAULT}, + {.ccode = "TC", .region = REGIONS_FCC}, + {.ccode = "TD", .region = REGIONS_ETSI}, + {.ccode = "TG", .region = REGIONS_ETSI}, + {.ccode = "TH", .region = REGIONS_FCC}, + {.ccode = "TJ", .region = REGIONS_ETSI}, + {.ccode = "TM", .region = REGIONS_ETSI}, + {.ccode = "TN", .region = REGIONS_ETSI}, + {.ccode = "TR", .region = REGIONS_ETSI}, + {.ccode = "TT", .region = REGIONS_FCC}, + {.ccode = "TW", .region = REGIONS_FCC}, + {.ccode = "UA", .region = REGIONS_ETSI}, + {.ccode = "UG", .region = REGIONS_FCC}, + {.ccode = "UY", .region = REGIONS_FCC}, + {.ccode = "UZ", .region = REGIONS_ETSI}, + {.ccode = "VC", .region = REGIONS_ETSI}, + {.ccode = "VE", .region = REGIONS_FCC}, + {.ccode = "VI", .region = REGIONS_FCC}, + {.ccode = "VN", .region = REGIONS_JP}, + {.ccode = "VU", .region = REGIONS_FCC}, + {.ccode = "WF", .region = REGIONS_ETSI}, + {.ccode = "YE", .region = REGIONS_DEFAULT}, + {.ccode = "YT", .region = REGIONS_ETSI}, + {.ccode = "ZA", .region = REGIONS_ETSI}, + {.ccode = "ZM", .region = REGIONS_ETSI}, + {.ccode = "ZW", .region = REGIONS_ETSI}, +}; + +uint8_t get_ccode_region(char * ccode) +{ + int i, cnt; + AICWFDBG(LOGDEBUG, "%s ccode:%s\r\n", __func__, ccode); + + cnt = sizeof(reg_tables) / sizeof(reg_tables[0]); + + for (i = 0; i < cnt; i++) { + if (reg_tables[i].ccode[0] == ccode[0] && + reg_tables[i].ccode[1] == ccode[1]) { + AICWFDBG(LOGDEBUG, "region: %d\r\n", reg_tables[i].region); + return reg_tables[i].region; + } + } + AICWFDBG(LOGDEBUG, "use default region\r\n"); + return REGIONS_DEFAULT; +} + +u8 get_region_index(char * name) +{ + if (strncmp(name, "SRRC", 4) == 0) + return REGIONS_SRRC; + else if (strncmp(name, "FCC", 3) == 0) + return REGIONS_FCC; + else if (strncmp(name, "ETSI", 4) == 0) + return REGIONS_ETSI; + else if (strncmp(name, "JP", 2) == 0) + return REGIONS_JP; + else if (strncmp(name, "UNSET", 5) == 0) + return REGIONS_DEFAULT; + + return REGIONS_DEFAULT; +} + + +#ifdef CONFIG_POWER_LIMIT +#define POWER_LIMIT_INVALID_VAL POWER_LEVEL_INVALID_VAL + +#define POWER_LIMIT_CC_MATCHED_BIT (0x1U << 0) + +#define MAX_2_4G_BW_NUM 2 +#define MAX_5G_BW_NUM 3 +#define MAX_REGION_NUM 5 + + +typedef struct +{ + u8_l ch_cnt_2g4[MAX_2_4G_BW_NUM]; + u8_l ch_cnt_5g[MAX_5G_BW_NUM]; + u8_l ch_num_2g4[MAX_2_4G_BW_NUM][MAC_DOMAINCHANNEL_24G_MAX]; + u8_l ch_num_5g[MAX_5G_BW_NUM][MAC_DOMAINCHANNEL_5G_MAX]; + s8_l max_pwr_2g4[MAX_2_4G_BW_NUM][MAC_DOMAINCHANNEL_24G_MAX]; + s8_l max_pwr_5g[MAX_5G_BW_NUM][MAC_DOMAINCHANNEL_5G_MAX]; +} txpwr_lmt_info_t; + +typedef struct +{ + u32_l flags; + txpwr_lmt_info_t txpwr_lmt[MAX_REGION_NUM]; +} powerlimit_info_t; + +powerlimit_info_t powerlimit_info = {0,}; +#endif + +#ifndef CONFIG_ROM_PATCH_EN +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_dentry->d_inode; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ + + +#endif/* !CONFIG_ROM_PATCH_EN */ + + + +#ifdef CONFIG_RWNX_TL4 +/** + * rwnx_plat_tl4_fw_upload() - Load the requested FW into embedded side. + * + * @rwnx_plat: pointer to platform structure + * @fw_addr: Virtual address where the fw must be loaded + * @filename: Name of the fw. + * + * Load a fw, stored as a hex file, into the specified address + */ +static int rwnx_plat_tl4_fw_upload(struct rwnx_plat *rwnx_plat, u8* fw_addr, + char *filename) +{ + struct device *dev = rwnx_platform_get_dev(rwnx_plat); + const struct firmware *fw; + int err = 0; + u32 *dst; + u8 const *file_data; + char typ0, typ1; + u32 addr0, addr1; + u32 dat0, dat1; + int remain; + + err = request_firmware(&fw, filename, dev); + if (err) { + return err; + } + file_data = fw->data; + remain = fw->size; + + /* Copy the file on the Embedded side */ + dev_dbg(dev, "\n### Now copy %s firmware, @ = %p\n", filename, fw_addr); + + /* Walk through all the lines of the configuration file */ + while (remain >= 16) { + u32 data, offset; + + if (sscanf(file_data, "%c:%08X %04X", &typ0, &addr0, &dat0) != 3) + break; + if ((addr0 & 0x01) != 0) { + addr0 = addr0 - 1; + dat0 = 0; + } else { + file_data += 16; + remain -= 16; + } + if ((remain < 16) || + (sscanf(file_data, "%c:%08X %04X", &typ1, &addr1, &dat1) != 3) || + (typ1 != typ0) || (addr1 != (addr0 + 1))) { + typ1 = typ0; + addr1 = addr0 + 1; + dat1 = 0; + } else { + file_data += 16; + remain -= 16; + } + + if (typ0 == 'C') { + offset = 0x00200000; + if ((addr1 % 4) == 3) + offset += 2*(addr1 - 3); + else + offset += 2*(addr1 + 1); + + data = dat1 | (dat0 << 16); + } else { + offset = 2*(addr1 - 1); + data = dat0 | (dat1 << 16); + } + dst = (u32 *)(fw_addr + offset); + *dst = data; + } + + release_firmware(fw); + + return err; +} +#endif + +#if 0 +/** + * rwnx_plat_bin_fw_upload() - Load the requested binary FW into embedded side. + * + * @rwnx_plat: pointer to platform structure + * @fw_addr: Virtual address where the fw must be loaded + * @filename: Name of the fw. + * + * Load a fw, stored as a binary file, into the specified address + */ +static int rwnx_plat_bin_fw_upload(struct rwnx_plat *rwnx_plat, u8* fw_addr, + char *filename) +{ + const struct firmware *fw; + struct device *dev = rwnx_platform_get_dev(rwnx_plat); + int err = 0; + unsigned int i, size; + u32 *src, *dst; + + err = request_firmware(&fw, filename, dev); + if (err) { + return err; + } + + /* Copy the file on the Embedded side */ + dev_dbg(dev, "\n### Now copy %s firmware, @ = %p\n", filename, fw_addr); + + src = (u32 *)fw->data; + dst = (u32 *)fw_addr; + size = (unsigned int)fw->size; + + /* check potential platform bug on multiple stores vs memcpy */ + for (i = 0; i < size; i += 4) { + *dst++ = *src++; + } + + release_firmware(fw); + + return err; +} +#endif + +#define MD5(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15] +#define MD5PINRT "file md5:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\r\n" + +static int rwnx_load_firmware(u32 **fw_buf, const char *name, struct device *device) +{ + +#ifdef CONFIG_USE_FW_REQUEST + const struct firmware *fw = NULL; + u32 *dst = NULL; + void *buffer=NULL; + MD5_CTX md5; + unsigned char decrypt[16]; + int size = 0; + int ret = 0; + + AICWFDBG(LOGINFO, "%s: request firmware = %s \n", __func__ ,name); + + ret = request_firmware(&fw, name, NULL); + + if (ret < 0) { + AICWFDBG(LOGERROR, "Load %s fail\n", name); + release_firmware(fw); + return -1; + } + + size = fw->size; + dst = (u32 *)fw->data; + + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + release_firmware(fw); + return -1; + } + + buffer = vmalloc(size); + memset(buffer, 0, size); + memcpy(buffer, dst, size); + + *fw_buf = buffer; + + MD5Init(&md5); + MD5Update(&md5, (unsigned char *)buffer, size); + MD5Final(&md5, decrypt); + AICWFDBG(LOGINFO, MD5PINRT, MD5(decrypt)); + + release_firmware(fw); + + return size; +#else + void *buffer = NULL; + char *path = NULL; + struct file *fp = NULL; + int size = 0, len = 0;// i = 0; + ssize_t rdlen = 0; + //u32 *src = NULL, *dst = NULL; + MD5_CTX md5; + unsigned char decrypt[16]; + + /* get the firmware path */ + path = __getname(); + if (!path) { + *fw_buf = NULL; + return -1; + } + + len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s", aic_fw_path, name); + + //len = snprintf(path, FW_PATH_MAX_LEN, "%s", name); + if (len >= FW_PATH_MAX_LEN) { + AICWFDBG(LOGERROR, "%s: %s file's path too long\n", __func__, name); + *fw_buf = NULL; + __putname(path); + return -1; + } + + AICWFDBG(LOGINFO, "%s :firmware path = %s \n", __func__, path); + + /* open the firmware file */ + fp = filp_open(path, O_RDONLY, 0); + if (IS_ERR_OR_NULL(fp)) { + AICWFDBG(LOGERROR, "%s: %s file failed to open\n", __func__, name); + *fw_buf = NULL; + __putname(path); + fp = NULL; + return -1; + } + + size = i_size_read(file_inode(fp)); + if (size <= 0) { + AICWFDBG(LOGERROR, "%s: %s file size invalid %d\n", __func__, name, size); + *fw_buf = NULL; + __putname(path); + filp_close(fp, NULL); + fp = NULL; + return -1; + } + + /* start to read from firmware file */ + buffer = vmalloc(size); + if (!buffer) { + *fw_buf = NULL; + __putname(path); + filp_close(fp, NULL); + fp = NULL; + return -1; + } + + #if LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 16) + rdlen = kernel_read(fp, buffer, size, &fp->f_pos); + #else + rdlen = kernel_read(fp, fp->f_pos, buffer, size); + #endif + + if (size != rdlen) { + AICWFDBG(LOGERROR, "%s: %s file rdlen invalid %d\n", __func__, name, (int)rdlen); + *fw_buf = NULL; + __putname(path); + filp_close(fp, NULL); + fp = NULL; + vfree(buffer); + buffer = NULL; + return -1; + } + if (rdlen > 0) { + fp->f_pos += rdlen; + } + +#if 0 + /*start to transform the data format*/ + src = (u32 *)buffer; + dst = (u32 *)vmalloc(size); + + if (!dst) { + *fw_buf = NULL; + __putname(path); + filp_close(fp, NULL); + fp = NULL; + vfree(buffer); + buffer = NULL; + return -1; + } + + for (i = 0; i < (size/4); i++) { + dst[i] = src[i]; + } +#endif + + __putname(path); + filp_close(fp, NULL); + fp = NULL; + //vfree(buffer); + //buffer = NULL; + *fw_buf = (u32*)buffer; + + MD5Init(&md5); + MD5Update(&md5, (unsigned char *)buffer, size); + MD5Final(&md5, decrypt); + + AICWFDBG(LOGINFO, MD5PINRT, MD5(decrypt)); + + return size; +#endif +} + +static void rwnx_restore_firmware(u32 **fw_buf) +{ + vfree(*fw_buf); + *fw_buf = NULL; +} + + +/* buffer is allocated by kzalloc */ +int rwnx_request_firmware_common(struct rwnx_hw *rwnx_hw, u32** buffer, const char *filename) +{ + int size; + + AICWFDBG(LOGINFO, "### Load file %s\n", filename); + + size = rwnx_load_firmware(buffer, filename, NULL); + + return size; +} + +void rwnx_release_firmware_common(u32** buffer) +{ + rwnx_restore_firmware(buffer); +} + + + +/** + * rwnx_plat_bin_fw_upload_2() - Load the requested binary FW into embedded side. + * + * @rwnx_hw: Main driver data + * @fw_addr: Address where the fw must be loaded + * @filename: Name of the fw. + * + * Load a fw, stored as a binary file, into the specified address + */ + +int rwnx_plat_bin_fw_upload_2(struct rwnx_hw *rwnx_hw, u32 fw_addr, + char *filename) +{ + int err = 0; + unsigned int i = 0, size; +// u32 *src; + u32 *dst = NULL; + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Upload %s firmware, @ = %x\n", filename, fw_addr); + + size = rwnx_request_firmware_common(rwnx_hw, &dst, filename); + if (!dst) { + AICWFDBG(LOGERROR, "No such file or directory\n"); + return -1; + } + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + err = -1; + return -1; + } + + AICWFDBG(LOGINFO, "size=%d, dst[0]=%x\n", size, dst[0]); + if (size > 512) { + for (; i < (size - 512); i += 512) { + //printk("wr blk 0: %p -> %x\r\n", dst + i / 4, fw_addr + i); + err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, 512, dst + i / 4); + if (err) { + AICWFDBG(LOGERROR, "bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + break; + } + } + } + if (!err && (i < size)) { + //printk("wr blk 1: %p -> %x\r\n", dst + i / 4, fw_addr + i); + err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, size - i, dst + i / 4); + if (err) { + AICWFDBG(LOGERROR, "bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + } + } + + if (dst) { + rwnx_release_firmware_common(&dst); + } + + return err; +} + + + +#ifndef CONFIG_ROM_PATCH_EN +#if defined(CONFIG_PLATFORM_ALLWINNER) || defined(CONFIG_NANOPI_M4) +#if 0 +static int aic_load_firmware(u32 ** fw_buf, const char *name, + struct device *device) +{ + void *buffer=NULL; + char *path=NULL; + struct file *fp=NULL; + int size = 0, len=0, i=0; + ssize_t rdlen=0; + u32 *src=NULL, *dst = NULL; + RWNX_DBG(RWNX_FN_ENTRY_STR); + + /* get the firmware path */ + path = __getname(); + if (!path){ + *fw_buf=NULL; + return -1; + } + + len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s",aic_fw_path, name); + if (len >= FW_PATH_MAX_LEN) { + printk("%s: %s file's path too long\n", __func__, name); + *fw_buf=NULL; + __putname(path); + return -1; + } + + printk("%s :firmware path = %s \n", __func__ ,path); + + + /* open the firmware file */ + fp=filp_open(path, O_RDONLY, 0); + if(IS_ERR(fp) || (!fp)){ + printk("%s: %s file failed to open\n", __func__, name); + if(IS_ERR(fp)) + printk("is_Err\n"); + *fw_buf=NULL; + __putname(path); + fp=NULL; + return -1; + } + + size = i_size_read(file_inode(fp)); + if(size<=0){ + printk("%s: %s file size invalid %d\n", __func__, name, size); + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + return -1; + } + + /* start to read from firmware file */ + buffer = kzalloc(size, GFP_KERNEL); + if(!buffer){ + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + return -1; + } + + + #if LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 16) + rdlen = kernel_read(fp, buffer, size, &fp->f_pos); + #else + rdlen = kernel_read(fp, fp->f_pos, buffer, size); + #endif + + if(size != rdlen){ + printk("%s: %s file rdlen invalid %ld\n", __func__, name, (long int)rdlen); + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + kfree(buffer); + buffer=NULL; + return -1; + } + if(rdlen > 0){ + fp->f_pos += rdlen; + } + + + /*start to transform the data format*/ + src = (u32*)buffer; + printk("malloc dst\n"); + dst = (u32*)kzalloc(size,GFP_KERNEL); + + if(!dst){ + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + kfree(buffer); + buffer=NULL; + return -1; + } + + for(i=0;i<(size/4);i++){ + dst[i] = src[i]; + } + + __putname(path); + filp_close(fp,NULL); + fp=NULL; + kfree(buffer); + buffer=NULL; + *fw_buf = dst; + + return size; + +} +#endif +#endif +#endif + + +#ifndef CONFIG_ROM_PATCH_EN +#if defined(CONFIG_PLATFORM_ALLWINNER) || defined(CONFIG_NANOPI_M4) +#if 0 +static int rwnx_plat_bin_fw_upload_android(struct rwnx_hw *rwnx_hw, u32 fw_addr, + char *filename) +{ + struct device *dev = rwnx_platform_get_dev(rwnx_hw->plat); + unsigned int i=0; + int size; + u32 *dst=NULL; + int err=0; + + + /* load aic firmware */ + size = aic_load_firmware(&dst, filename, dev); + if(size<=0){ + printk("wrong size of firmware file\n"); + kfree(dst); + dst = NULL; + return -1; + } + + + /* Copy the file on the Embedded side */ + printk("\n### Upload %s firmware, @ = %x size=%d\n", filename, fw_addr, size); + + if (size > 1024) {// > 1KB data + for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB + err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, 1024, dst + i / 4); + if (err) { + printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + break; + } + } + } + + if (!err && (i < size)) {// <1KB data + err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, size - i, dst + i / 4); + if (err) { + printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + } + } + + if (dst) { + kfree(dst); + dst = NULL; + } + + return err; +} +#endif +#endif +#endif + + + +#if 0 +#ifndef CONFIG_RWNX_TL4 +#define IHEX_REC_DATA 0 +#define IHEX_REC_EOF 1 +#define IHEX_REC_EXT_SEG_ADD 2 +#define IHEX_REC_START_SEG_ADD 3 +#define IHEX_REC_EXT_LIN_ADD 4 +#define IHEX_REC_START_LIN_ADD 5 + +/** + * rwnx_plat_ihex_fw_upload() - Load the requested intel hex 8 FW into embedded side. + * + * @rwnx_plat: pointer to platform structure + * @fw_addr: Virtual address where the fw must be loaded + * @filename: Name of the fw. + * + * Load a fw, stored as a ihex file, into the specified address. + */ +static int rwnx_plat_ihex_fw_upload(struct rwnx_plat *rwnx_plat, u8* fw_addr, + char *filename) +{ + const struct firmware *fw; + struct device *dev = rwnx_platform_get_dev(rwnx_plat); + u8 const *src, *end; + u32 *dst; + u16 haddr, segaddr, addr; + u32 hwaddr; + u8 load_fw, byte_count, checksum, csum, rec_type; + int err, rec_idx; + char hex_buff[9]; + + err = request_firmware(&fw, filename, dev); + if (err) { + return err; + } + + /* Copy the file on the Embedded side */ + dev_dbg(dev, "\n### Now copy %s firmware, @ = %p\n", filename, fw_addr); + + src = fw->data; + end = src + (unsigned int)fw->size; + haddr = 0; + segaddr = 0; + load_fw = 1; + err = -EINVAL; + rec_idx = 0; + hwaddr = 0; + +#define IHEX_READ8(_val, _cs) { \ + hex_buff[2] = 0; \ + strncpy(hex_buff, src, 2); \ + if (kstrtou8(hex_buff, 16, &_val)) \ + goto end; \ + src += 2; \ + if (_cs) \ + csum += _val; \ + } + +#define IHEX_READ16(_val) { \ + hex_buff[4] = 0; \ + strncpy(hex_buff, src, 4); \ + if (kstrtou16(hex_buff, 16, &_val)) \ + goto end; \ + src += 4; \ + csum += (_val & 0xff) + (_val >> 8); \ + } + +#define IHEX_READ32(_val) { \ + hex_buff[8] = 0; \ + strncpy(hex_buff, src, 8); \ + if (kstrtouint(hex_buff, 16, &_val)) \ + goto end; \ + src += 8; \ + csum += (_val & 0xff) + ((_val >> 8) & 0xff) + \ + ((_val >> 16) & 0xff) + (_val >> 24); \ + } + +#define IHEX_READ32_PAD(_val, _nb) { \ + memset(hex_buff, '0', 8); \ + hex_buff[8] = 0; \ + strncpy(hex_buff, src, (2 * _nb)); \ + if (kstrtouint(hex_buff, 16, &_val)) \ + goto end; \ + src += (2 * _nb); \ + csum += (_val & 0xff) + ((_val >> 8) & 0xff) + \ + ((_val >> 16) & 0xff) + (_val >> 24); \ +} + + /* loop until end of file is read*/ + while (load_fw) { + rec_idx++; + csum = 0; + + /* Find next colon start code */ + while (*src != ':') { + src++; + if ((src + 3) >= end) /* 3 = : + rec_len */ + goto end; + } + src++; + + /* Read record len */ + IHEX_READ8(byte_count, 1); + if ((src + (byte_count * 2) + 8) >= end) /* 8 = rec_addr + rec_type + chksum */ + goto end; + + /* Read record addr */ + IHEX_READ16(addr); + + /* Read record type */ + IHEX_READ8(rec_type, 1); + + switch(rec_type) { + case IHEX_REC_DATA: + { + /* Update destination address */ + dst = (u32 *) (fw_addr + hwaddr + addr); + + while (byte_count) { + u32 val; + if (byte_count >= 4) { + IHEX_READ32(val); + byte_count -= 4; + } else { + IHEX_READ32_PAD(val, byte_count); + byte_count = 0; + } + *dst++ = __swab32(val); + } + break; + } + case IHEX_REC_EOF: + { + load_fw = 0; + err = 0; + break; + } + case IHEX_REC_EXT_SEG_ADD: /* Extended Segment Address */ + { + IHEX_READ16(segaddr); + hwaddr = (haddr << 16) + (segaddr << 4); + break; + } + case IHEX_REC_EXT_LIN_ADD: /* Extended Linear Address */ + { + IHEX_READ16(haddr); + hwaddr = (haddr << 16) + (segaddr << 4); + break; + } + case IHEX_REC_START_LIN_ADD: /* Start Linear Address */ + { + u32 val; + IHEX_READ32(val); /* need to read for checksum */ + break; + } + case IHEX_REC_START_SEG_ADD: + default: + { + dev_err(dev, "ihex: record type %d not supported\n", rec_type); + load_fw = 0; + } + } + + /* Read and compare checksum */ + IHEX_READ8(checksum, 0); + if (checksum != (u8)(~csum + 1)) + goto end; + } + +#undef IHEX_READ8 +#undef IHEX_READ16 +#undef IHEX_READ32 +#undef IHEX_READ32_PAD + + end: + release_firmware(fw); + + if (err) + dev_err(dev, "%s: Invalid ihex record around line %d\n", filename, rec_idx); + + return err; +} +#endif /* CONFIG_RWNX_TL4 */ + +#ifndef CONFIG_RWNX_SDM +/** + * rwnx_plat_get_rf() - Retrun the RF used in the platform + * + * @rwnx_plat: pointer to platform structure + */ +static u32 rwnx_plat_get_rf(struct rwnx_plat *rwnx_plat) +{ + u32 ver; + ver = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, MDM_HDMCONFIG_ADDR); + + ver = __MDM_PHYCFG_FROM_VERS(ver); + WARN(((ver != MDM_PHY_CONFIG_TRIDENT) && + (ver != MDM_PHY_CONFIG_ELMA) && + (ver != MDM_PHY_CONFIG_KARST)), + "bad phy version 0x%08x\n", ver); + + return ver; +} + +/** + * rwnx_plat_stop_agcfsm() - Stop a AGC state machine + * + * @rwnx_plat: pointer to platform structure + * @agg_reg: Address of the agccntl register (within RWNX_ADDR_SYSTEM) + * @agcctl: Updated with value of the agccntl rgister before stop + * @memclk: Updated with value of the clock register before stop + * @agc_ver: Version of the AGC load procedure + * @clkctrladdr: Indicates which AGC clock register should be accessed + */ +static void rwnx_plat_stop_agcfsm(struct rwnx_plat *rwnx_plat, int agc_reg, + u32 *agcctl, u32 *memclk, u8 agc_ver, + u32 clkctrladdr) +{ + /* First read agcctnl and clock registers */ + *memclk = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, clkctrladdr); + + /* Stop state machine : xxAGCCNTL0[AGCFSMRESET]=1 */ + *agcctl = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, agc_reg); + RWNX_REG_WRITE((*agcctl) | BIT(12), rwnx_plat, RWNX_ADDR_SYSTEM, agc_reg); + + /* Force clock */ + if (agc_ver > 0) { + /* CLKGATEFCTRL0[AGCCLKFORCE]=1 */ + RWNX_REG_WRITE((*memclk) | BIT(29), rwnx_plat, RWNX_ADDR_SYSTEM, + clkctrladdr); + } else { + /* MEMCLKCTRL0[AGCMEMCLKCTRL]=0 */ + RWNX_REG_WRITE((*memclk) & ~BIT(3), rwnx_plat, RWNX_ADDR_SYSTEM, + clkctrladdr); + } +} + + +/** + * rwnx_plat_start_agcfsm() - Restart a AGC state machine + * + * @rwnx_plat: pointer to platform structure + * @agg_reg: Address of the agccntl register (within RWNX_ADDR_SYSTEM) + * @agcctl: value of the agccntl register to restore + * @memclk: value of the clock register to restore + * @agc_ver: Version of the AGC load procedure + * @clkctrladdr: Indicates which AGC clock register should be accessed + */ +static void rwnx_plat_start_agcfsm(struct rwnx_plat *rwnx_plat, int agc_reg, + u32 agcctl, u32 memclk, u8 agc_ver, + u32 clkctrladdr) +{ + + /* Release clock */ + if (agc_ver > 0) + /* CLKGATEFCTRL0[AGCCLKFORCE]=0 */ + RWNX_REG_WRITE(memclk & ~BIT(29), rwnx_plat, RWNX_ADDR_SYSTEM, + clkctrladdr); + else + /* MEMCLKCTRL0[AGCMEMCLKCTRL]=1 */ + RWNX_REG_WRITE(memclk | BIT(3), rwnx_plat, RWNX_ADDR_SYSTEM, + clkctrladdr); + + /* Restart state machine: xxAGCCNTL0[AGCFSMRESET]=0 */ + RWNX_REG_WRITE(agcctl & ~BIT(12), rwnx_plat, RWNX_ADDR_SYSTEM, agc_reg); +} +#endif + +/** + * rwnx_plat_fcu_load() - Load FCU (Fith Chain Unit) ucode + * + * @rwnx_hw: main driver data + * + * c.f Modem UM (AGC/CCA initialization) + */ +static int rwnx_plat_fcu_load(struct rwnx_hw *rwnx_hw) +{ + int ret=0; +#ifndef CONFIG_RWNX_SDM + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + u32 agcctl, memclk; + +#ifndef CONFIG_RWNX_FHOST + /* By default, we consider that there is only one RF in the system */ + rwnx_hw->phy.cnt = 1; +#endif // CONFIG_RWNX_FHOST + + if (rwnx_plat_get_rf(rwnx_plat) != MDM_PHY_CONFIG_ELMA) + /* No FCU for PHYs other than Elma */ + return 0; + + agcctl = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, RIU_RWNXAGCCNTL_ADDR); + if (!__RIU_FCU_PRESENT(agcctl)) + /* No FCU present in this version */ + return 0; + +#ifndef CONFIG_RWNX_FHOST + /* FCU is present */ + #ifdef USE_5G + rwnx_hw->phy.cnt = 2; + rwnx_hw->phy.sec_chan.band = NL80211_BAND_5GHZ; + rwnx_hw->phy.sec_chan.type = PHY_CHNL_BW_20; + rwnx_hw->phy.sec_chan.prim20_freq = 5500; + rwnx_hw->phy.sec_chan.center_freq1 = 5500; + rwnx_hw->phy.sec_chan.center_freq2 = 0; + #endif +#endif // CONFIG_RWNX_FHOST + + rwnx_plat_stop_agcfsm(rwnx_plat, FCU_RWNXFCAGCCNTL_ADDR, &agcctl, &memclk, 0, + MDM_MEMCLKCTRL0_ADDR); + + ret = rwnx_plat_bin_fw_upload(rwnx_plat, + RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, PHY_FCU_UCODE_ADDR), + RWNX_FCU_FW_NAME); + + rwnx_plat_start_agcfsm(rwnx_plat, FCU_RWNXFCAGCCNTL_ADDR, agcctl, memclk, 0, + MDM_MEMCLKCTRL0_ADDR); +#endif + + return ret; +} + +/** + * rwnx_is_new_agc_load() - Return is new agc clock register should be used + * + * @rwnx_plat: platform data + * @rf: rf in used + * + * c.f Modem UM (AGC/CCA initialization) + */ +#ifndef CONFIG_RWNX_SDM +static u8 rwnx_get_agc_load_version(struct rwnx_plat *rwnx_plat, u32 rf, u32 *clkctrladdr) +{ + u8 agc_load_ver = 0; + u32 agc_ver; + u32 regval; + + /* Trident and Elma PHY use old method */ + if (rf != MDM_PHY_CONFIG_KARST) { + *clkctrladdr = MDM_MEMCLKCTRL0_ADDR; + return 0; + } + + /* Get the FPGA signature */ + regval = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_SIGNATURE_ADDR); + + if (__FPGA_TYPE(regval) == 0xC0CA) + *clkctrladdr = CRM_CLKGATEFCTRL0_ADDR; + else + *clkctrladdr = MDM_CLKGATEFCTRL0_ADDR; + + /* Read RIU version register */ + agc_ver = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, RIU_RWNXVERSION_ADDR); + agc_load_ver = __RIU_AGCLOAD_FROM_VERS(agc_ver); + + return agc_load_ver; +} +#endif /* CONFIG_RWNX_SDM */ + +/** + * rwnx_plat_agc_load() - Load AGC ucode + * + * @rwnx_plat: platform data + * c.f Modem UM (AGC/CCA initialization) + */ +static int rwnx_plat_agc_load(struct rwnx_plat *rwnx_plat) +{ + int ret = 0; +#ifndef CONFIG_RWNX_SDM + u32 agc = 0, agcctl, memclk; + u32 clkctrladdr; + u32 rf = rwnx_plat_get_rf(rwnx_plat); + u8 agc_ver; + + switch (rf) { + case MDM_PHY_CONFIG_TRIDENT: + agc = AGC_RWNXAGCCNTL_ADDR; + break; + case MDM_PHY_CONFIG_ELMA: + case MDM_PHY_CONFIG_KARST: + agc = RIU_RWNXAGCCNTL_ADDR; + break; + default: + return -1; + } + + agc_ver = rwnx_get_agc_load_version(rwnx_plat, rf, &clkctrladdr); + + rwnx_plat_stop_agcfsm(rwnx_plat, agc, &agcctl, &memclk, agc_ver, clkctrladdr); + + ret = rwnx_plat_bin_fw_upload(rwnx_plat, + RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, PHY_AGC_UCODE_ADDR), + RWNX_AGC_FW_NAME); + + if (!ret && (agc_ver == 1)) { + /* Run BIST to ensure that the AGC RAM was correctly loaded */ + RWNX_REG_WRITE(BIT(28), rwnx_plat, RWNX_ADDR_SYSTEM, + RIU_RWNXDYNAMICCONFIG_ADDR); + while (RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, + RIU_RWNXDYNAMICCONFIG_ADDR) & BIT(28)); + + if (!(RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, + RIU_AGCMEMBISTSTAT_ADDR) & BIT(0))) { + dev_err(rwnx_platform_get_dev(rwnx_plat), + "AGC RAM not loaded correctly 0x%08x\n", + RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, + RIU_AGCMEMSIGNATURESTAT_ADDR)); + ret = -EIO; + } + } + + rwnx_plat_start_agcfsm(rwnx_plat, agc, agcctl, memclk, agc_ver, clkctrladdr); + +#endif + return ret; +} + +/** + * rwnx_ldpc_load() - Load LDPC RAM + * + * @rwnx_hw: Main driver data + * c.f Modem UM (LDPC initialization) + */ +static int rwnx_ldpc_load(struct rwnx_hw *rwnx_hw) +{ +#ifndef CONFIG_RWNX_SDM + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + u32 rf = rwnx_plat_get_rf(rwnx_plat); + u32 phy_feat = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, MDM_HDMCONFIG_ADDR); + + if ((rf != MDM_PHY_CONFIG_KARST) || + (phy_feat & (MDM_LDPCDEC_BIT | MDM_LDPCENC_BIT)) != + (MDM_LDPCDEC_BIT | MDM_LDPCENC_BIT)) { + goto disable_ldpc; + } + + if (rwnx_plat_bin_fw_upload(rwnx_plat, + RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, PHY_LDPC_RAM_ADDR), + RWNX_LDPC_RAM_NAME)) { + goto disable_ldpc; + } + + return 0; + + disable_ldpc: + rwnx_hw->mod_params->ldpc_on = false; + +#endif /* CONFIG_RWNX_SDM */ + return 0; +} + +/** + * rwnx_plat_lmac_load() - Load FW code + * + * @rwnx_plat: platform data + */ +static int rwnx_plat_lmac_load(struct rwnx_plat *rwnx_plat) +{ + int ret; + + #ifdef CONFIG_RWNX_TL4 + ret = rwnx_plat_tl4_fw_upload(rwnx_plat, + RWNX_ADDR(rwnx_plat, RWNX_ADDR_CPU, RAM_LMAC_FW_ADDR), + RWNX_MAC_FW_NAME); + #else + ret = rwnx_plat_ihex_fw_upload(rwnx_plat, + RWNX_ADDR(rwnx_plat, RWNX_ADDR_CPU, RAM_LMAC_FW_ADDR), + RWNX_MAC_FW_NAME); + if (ret == -ENOENT) + { + ret = rwnx_plat_bin_fw_upload(rwnx_plat, + RWNX_ADDR(rwnx_plat, RWNX_ADDR_CPU, RAM_LMAC_FW_ADDR), + RWNX_MAC_FW_NAME2); + } + #endif + + return ret; +} +#endif + +#ifndef CONFIG_ROM_PATCH_EN +/** + * rwnx_plat_fmac_load() - Load FW code + * + * @rwnx_hw: Main driver data + */ +#if 0 +static int rwnx_plat_fmac_load(struct rwnx_hw *rwnx_hw) +{ + int ret; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + #if defined(CONFIG_NANOPI_M4) || defined(CONFIG_PLATFORM_ALLWINNER) + ret = rwnx_plat_bin_fw_upload_android(rwnx_hw, RAM_FMAC_FW_ADDR, RWNX_MAC_FW_NAME2); + #else + ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, + RAM_FMAC_FW_ADDR, + RWNX_MAC_FW_NAME2); + #endif + return ret; +} +#endif +#endif /* !CONFIG_ROM_PATCH_EN */ + +#if 0 +/** + * rwnx_plat_mpif_sel() - Select the MPIF according to the FPGA signature + * + * @rwnx_plat: platform data + */ +static void rwnx_plat_mpif_sel(struct rwnx_plat *rwnx_plat) +{ +#ifndef CONFIG_RWNX_SDM + u32 regval; + u32 type; + + /* Get the FPGA signature */ + regval = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_SIGNATURE_ADDR); + type = __FPGA_TYPE(regval); + + /* Check if we need to switch to the old MPIF or not */ + if ((type != 0xCAFE) && (type != 0XC0CA) && (regval & 0xF) < 0x3) + { + /* A old FPGA A is used, so configure the FPGA B to use the old MPIF */ + RWNX_REG_WRITE(0x3, rwnx_plat, RWNX_ADDR_SYSTEM, FPGAB_MPIF_SEL_ADDR); + } +#endif +} +#endif +#if (defined(CONFIG_DPD) && !defined(CONFIG_FORCE_DPD_CALIB)) +int is_file_exist(char* name) +{ + char *path = NULL; + struct file *fp = NULL; + int len; + + path = __getname(); + if (!path) { + AICWFDBG(LOGINFO, "%s getname fail\n", __func__); + return -1; + } + + len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s", aic_fw_path, name); + + fp = filp_open(path, O_RDONLY, 0); + if (IS_ERR(fp)) { + __putname(path); + fp = NULL; + return 0; + } else { + __putname(path); + filp_close(fp, NULL); + fp = NULL; + return 1; + } +} +#endif//CONFIG_DPD && !CONFIG_FORCE_DPD_CALIB +/** + * rwnx_plat_patch_load() - Load patch code + * + * @rwnx_hw: Main driver data + */ +#ifdef CONFIG_ROM_PATCH_EN +static int rwnx_plat_patch_load(struct rwnx_hw *rwnx_hw) +{ + int ret = 0; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ +#ifndef ANDROID_PLATFORM + sprintf(aic_fw_path, "%s/%s", aic_fw_path, "aic8800DC"); +#endif + AICWFDBG(LOGINFO, "testmode=%d\n", testmode); + if (chip_sub_id == 0) { + if (testmode == FW_NORMAL_MODE) { + ret = aicwf_plat_patch_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load patch bin fail: %d\n", ret); + return ret; + } + aicwf_patch_config_8800dc(rwnx_hw); + } else if (testmode == FW_RFTEST_MODE) { + ret = aicwf_plat_rftest_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load rftest bin fail: %d\n", ret); + return ret; + } + } + } else if (chip_sub_id >= 1) { + if (testmode == FW_NORMAL_MODE) { + ret = aicwf_plat_patch_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load patch bin fail: %d\n", ret); + return ret; + } + aicwf_patch_config_8800dc(rwnx_hw); + #ifdef CONFIG_DPD + #ifdef CONFIG_FORCE_DPD_CALIB + if (1) { + AICWFDBG(LOGINFO, "dpd calib & write\n"); + ret = aicwf_dpd_calib_8800dc(rwnx_hw, &dpd_res); + if (ret) { + AICWFDBG(LOGINFO, "dpd calib fail: %d\n", ret); + return ret; + } + } + #else + if (is_file_exist(FW_DPDRESULT_NAME_8800DC) == 1) { + AICWFDBG(LOGINFO, "dpd bin load\n"); + ret = aicwf_dpd_result_load_8800dc(rwnx_hw, &dpd_res); + if (ret) { + AICWFDBG(LOGINFO, "load dpd bin fail: %d\n", ret); + return ret; + } + ret = aicwf_dpd_result_apply_8800dc(rwnx_hw, &dpd_res); + if (ret) { + AICWFDBG(LOGINFO, "apply dpd bin fail: %d\n", ret); + return ret; + } + } + #endif + else + #elif defined(CONFIG_LOFT_CALIB) + if (1) { + AICWFDBG(LOGINFO, "loft calib\n"); + ret = aicwf_loft_calib_8800dc(rwnx_hw, &loft_res_local); + if (ret) { + AICWFDBG(LOGINFO, "loft calib fail: %d\n", ret); + return ret; + } + } else + #endif + { + ret = aicwf_misc_ram_init_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "misc ram init fail: %d\n", ret); + return ret; + } + } + } else if (testmode == FW_RFTEST_MODE) { + #ifdef CONFIG_DPD + #ifdef CONFIG_FORCE_DPD_CALIB + if (1) { + AICWFDBG(LOGINFO, "patch load\n"); + ret = aicwf_plat_patch_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load patch bin fail: %d\n", ret); + return ret; + } + //aicwf_patch_config_8800dc(rwnx_hw); + AICWFDBG(LOGINFO, "dpd calib & write\n"); + ret = aicwf_dpd_calib_8800dc(rwnx_hw, &dpd_res); + if (ret) { + AICWFDBG(LOGINFO, "dpd calib fail: %d\n", ret); + return ret; + } + } + #endif/*CONFIG_FORCE_DPD_CALIB*/ + #elif defined(CONFIG_LOFT_CALIB) + { + AICWFDBG(LOGINFO, "patch load\n"); + ret = aicwf_plat_patch_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load patch bin fail: %d\n", ret); + return ret; + } + AICWFDBG(LOGINFO, "loft calib\n"); + ret = aicwf_loft_calib_8800dc(rwnx_hw, &loft_res_local); + if (ret) { + AICWFDBG(LOGINFO, "loft calib fail: %d\n", ret); + return ret; + } + } + #endif/*CONFIG_DPD*/ + AICWFDBG(LOGINFO, "%s load rftest bin\n", __func__); + ret = aicwf_plat_rftest_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load rftest bin fail: %d\n", ret); + return ret; + } + /* Note: apply dpd_res after rftest running */ + } else if (testmode == FW_DPDCALIB_MODE) { + #if (defined(CONFIG_DPD) && !defined(CONFIG_FORCE_DPD_CALIB)) + if (is_file_exist(FW_DPDRESULT_NAME_8800DC) == 0) { + AICWFDBG(LOGINFO, "patch load\n"); + ret = aicwf_plat_patch_load_8800dc(rwnx_hw); + if (ret) { + AICWFDBG(LOGINFO, "load patch bin fail: %d\n", ret); + return ret; + } + //aicwf_patch_config_8800dc(rwnx_hw); + AICWFDBG(LOGINFO, "dpd calib & write\n"); + ret = aicwf_dpd_calib_8800dc(rwnx_hw, &dpd_res); + if (ret) { + AICWFDBG(LOGINFO, "dpd calib fail: %d\n", ret); + return ret; + } + ret = aicwf_dpd_result_write_8800dc((void *)&dpd_res, DPD_RESULT_SIZE_8800DC); + if (ret) { + AICWFDBG(LOGINFO, "file write fail: %d\n", ret); + return ret; + } + } + #endif + return 1; // exit calib mode + } + } + } + + return ret; +} +#endif + + +/** + * rwnx_platform_reset() - Reset the platform + * + * @rwnx_plat: platform data + */ +static int rwnx_platform_reset(struct rwnx_plat *rwnx_plat) +{ + u32 regval; + +#if defined(AICWF_USB_SUPPORT) || defined(AICWF_SDIO_SUPPORT) + return 0; +#endif + + /* the doc states that SOFT implies FPGA_B_RESET + * adding FPGA_B_RESET is clearer */ + RWNX_REG_WRITE(SOFT_RESET | FPGA_B_RESET, rwnx_plat, + RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR); + msleep(100); + + regval = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR); + + if (regval & SOFT_RESET) { + dev_err(rwnx_platform_get_dev(rwnx_plat), "reset: failed\n"); + return -EIO; + } + + RWNX_REG_WRITE(regval & ~FPGA_B_RESET, rwnx_plat, + RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR); + msleep(100); + return 0; +} + +/** + * rwmx_platform_save_config() - Save hardware config before reload + * + * @rwnx_plat: Pointer to platform data + * + * Return configuration registers values. + */ +static void* rwnx_term_save_config(struct rwnx_plat *rwnx_plat) +{ + const u32 *reg_list; + u32 *reg_value, *res; + int i, size = 0; + + if (rwnx_plat->get_config_reg) { + size = rwnx_plat->get_config_reg(rwnx_plat, ®_list); + } + + if (size <= 0) + return NULL; + + res = kmalloc(sizeof(u32) * size, GFP_KERNEL); + if (!res) + return NULL; + + reg_value = res; + for (i = 0; i < size; i++) { + *reg_value++ = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, + *reg_list++); + } + + return res; +} + +#if 0 +/** + * rwmx_platform_restore_config() - Restore hardware config after reload + * + * @rwnx_plat: Pointer to platform data + * @reg_value: Pointer of value to restore + * (obtained with rwmx_platform_save_config()) + * + * Restore configuration registers value. + */ +static void rwnx_term_restore_config(struct rwnx_plat *rwnx_plat, + u32 *reg_value) +{ + const u32 *reg_list; + int i, size = 0; + + if (!reg_value || !rwnx_plat->get_config_reg) + return; + + size = rwnx_plat->get_config_reg(rwnx_plat, ®_list); + + for (i = 0; i < size; i++) { + RWNX_REG_WRITE(*reg_value++, rwnx_plat, RWNX_ADDR_SYSTEM, + *reg_list++); + } +} +#endif + +#ifndef CONFIG_RWNX_FHOST +#if 0 +static int rwnx_check_fw_compatibility(struct rwnx_hw *rwnx_hw) +{ + struct ipc_shared_env_tag *shared = rwnx_hw->ipc_env->shared; + #ifdef CONFIG_RWNX_FULLMAC + struct wiphy *wiphy = rwnx_hw->wiphy; + #endif //CONFIG_RWNX_FULLMAC + #ifdef CONFIG_RWNX_OLD_IPC + int ipc_shared_version = 10; + #else //CONFIG_RWNX_OLD_IPC + int ipc_shared_version = 11; + #endif //CONFIG_RWNX_OLD_IPC + int res = 0; + + if(shared->comp_info.ipc_shared_version != ipc_shared_version) + { + wiphy_err(wiphy, "Different versions of IPC shared version between driver and FW (%d != %d)\n ", + ipc_shared_version, shared->comp_info.ipc_shared_version); + res = -1; + } + + if(shared->comp_info.radarbuf_cnt != IPC_RADARBUF_CNT) + { + wiphy_err(wiphy, "Different number of host buffers available for Radar events handling "\ + "between driver and FW (%d != %d)\n", IPC_RADARBUF_CNT, + shared->comp_info.radarbuf_cnt); + res = -1; + } + + if(shared->comp_info.unsuprxvecbuf_cnt != IPC_UNSUPRXVECBUF_CNT) + { + wiphy_err(wiphy, "Different number of host buffers available for unsupported Rx vectors "\ + "handling between driver and FW (%d != %d)\n", IPC_UNSUPRXVECBUF_CNT, + shared->comp_info.unsuprxvecbuf_cnt); + res = -1; + } + + #ifdef CONFIG_RWNX_FULLMAC + if(shared->comp_info.rxdesc_cnt != IPC_RXDESC_CNT) + { + wiphy_err(wiphy, "Different number of shared descriptors available for Data RX handling "\ + "between driver and FW (%d != %d)\n", IPC_RXDESC_CNT, + shared->comp_info.rxdesc_cnt); + res = -1; + } + #endif /* CONFIG_RWNX_FULLMAC */ + + if(shared->comp_info.rxbuf_cnt != IPC_RXBUF_CNT) + { + wiphy_err(wiphy, "Different number of host buffers available for Data Rx handling "\ + "between driver and FW (%d != %d)\n", IPC_RXBUF_CNT, + shared->comp_info.rxbuf_cnt); + res = -1; + } + + if(shared->comp_info.msge2a_buf_cnt != IPC_MSGE2A_BUF_CNT) + { + wiphy_err(wiphy, "Different number of host buffers available for Emb->App MSGs "\ + "sending between driver and FW (%d != %d)\n", IPC_MSGE2A_BUF_CNT, + shared->comp_info.msge2a_buf_cnt); + res = -1; + } + + if(shared->comp_info.dbgbuf_cnt != IPC_DBGBUF_CNT) + { + wiphy_err(wiphy, "Different number of host buffers available for debug messages "\ + "sending between driver and FW (%d != %d)\n", IPC_DBGBUF_CNT, + shared->comp_info.dbgbuf_cnt); + res = -1; + } + + if(shared->comp_info.bk_txq != NX_TXDESC_CNT0) + { + wiphy_err(wiphy, "Driver and FW have different sizes of BK TX queue (%d != %d)\n", + NX_TXDESC_CNT0, shared->comp_info.bk_txq); + res = -1; + } + + if(shared->comp_info.be_txq != NX_TXDESC_CNT1) + { + wiphy_err(wiphy, "Driver and FW have different sizes of BE TX queue (%d != %d)\n", + NX_TXDESC_CNT1, shared->comp_info.be_txq); + res = -1; + } + + if(shared->comp_info.vi_txq != NX_TXDESC_CNT2) + { + wiphy_err(wiphy, "Driver and FW have different sizes of VI TX queue (%d != %d)\n", + NX_TXDESC_CNT2, shared->comp_info.vi_txq); + res = -1; + } + + if(shared->comp_info.vo_txq != NX_TXDESC_CNT3) + { + wiphy_err(wiphy, "Driver and FW have different sizes of VO TX queue (%d != %d)\n", + NX_TXDESC_CNT3, shared->comp_info.vo_txq); + res = -1; + } + + #if NX_TXQ_CNT == 5 + if(shared->comp_info.bcn_txq != NX_TXDESC_CNT4) + { + wiphy_err(wiphy, "Driver and FW have different sizes of BCN TX queue (%d != %d)\n", + NX_TXDESC_CNT4, shared->comp_info.bcn_txq); + res = -1; + } + #else + if (shared->comp_info.bcn_txq > 0) + { + wiphy_err(wiphy, "BCMC enabled in firmware but disabled in driver\n"); + res = -1; + } + #endif /* NX_TXQ_CNT == 5 */ + + if(shared->comp_info.ipc_shared_size != sizeof(ipc_shared_env)) + { + wiphy_err(wiphy, "Different sizes of IPC shared between driver and FW (%zd != %d)\n", + sizeof(ipc_shared_env), shared->comp_info.ipc_shared_size); + res = -1; + } + + if(shared->comp_info.msg_api != MSG_API_VER) + { + wiphy_warn(wiphy, "WARNING: Different supported message API versions between "\ + "driver and FW (%d != %d)\n", MSG_API_VER, shared->comp_info.msg_api); + } + + return res; +} +#endif +#endif /* !CONFIG_RWNX_FHOST */ + +int rwnx_atoi2(char *value, int c_len) +{ + int len = 0; + int i = 0; + int result = 0; + int flag = 1; + + if (value[0] == '-') { + flag = -1; + value++; + } + len = c_len; + + for (i = 0;i < len ;i++) { + result = result * 10; + if (value[i] >= 48 && value[i] <= 57) { + result += value[i] - 48; + } else { + result = 0; + break; + } + } + + return result * flag; +} + + +int rwnx_atoi(char *value) +{ + int len = 0; + int i = 0; + int result = 0; + int flag = 1; + + if (value[0] == '-') { + flag = -1; + value++; + } + len = strlen(value); + + for (i = 0;i < len ;i++) { + result = result * 10; + if (value[i] >= 48 && value[i] <= 57) { + result += value[i] - 48; + } else { + result = 0; + break; + } + } + + return result * flag; +} + +void get_userconfig_txpwr_lvl_in_fdrv(txpwr_lvl_conf_t *txpwr_lvl) +{ + txpwr_lvl->enable = userconfig_info.txpwr_lvl.enable; + txpwr_lvl->dsss = userconfig_info.txpwr_lvl.dsss; + txpwr_lvl->ofdmlowrate_2g4 = userconfig_info.txpwr_lvl.ofdmlowrate_2g4; + txpwr_lvl->ofdm64qam_2g4 = userconfig_info.txpwr_lvl.ofdm64qam_2g4; + txpwr_lvl->ofdm256qam_2g4 = userconfig_info.txpwr_lvl.ofdm256qam_2g4; + txpwr_lvl->ofdm1024qam_2g4 = userconfig_info.txpwr_lvl.ofdm1024qam_2g4; + txpwr_lvl->ofdmlowrate_5g = userconfig_info.txpwr_lvl.ofdmlowrate_5g; + txpwr_lvl->ofdm64qam_5g = userconfig_info.txpwr_lvl.ofdm64qam_5g; + txpwr_lvl->ofdm256qam_5g = userconfig_info.txpwr_lvl.ofdm256qam_5g; + txpwr_lvl->ofdm1024qam_5g = userconfig_info.txpwr_lvl.ofdm1024qam_5g; + + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl->enable); + AICWFDBG(LOGINFO, "%s:dsss:%d\r\n", __func__, txpwr_lvl->dsss); + AICWFDBG(LOGINFO, "%s:ofdmlowrate_2g4:%d\r\n", __func__, txpwr_lvl->ofdmlowrate_2g4); + AICWFDBG(LOGINFO, "%s:ofdm64qam_2g4:%d\r\n", __func__, txpwr_lvl->ofdm64qam_2g4); + AICWFDBG(LOGINFO, "%s:ofdm256qam_2g4:%d\r\n", __func__, txpwr_lvl->ofdm256qam_2g4); + AICWFDBG(LOGINFO, "%s:ofdm1024qam_2g4:%d\r\n", __func__, txpwr_lvl->ofdm1024qam_2g4); + AICWFDBG(LOGINFO, "%s:ofdmlowrate_5g:%d\r\n", __func__, txpwr_lvl->ofdmlowrate_5g); + AICWFDBG(LOGINFO, "%s:ofdm64qam_5g:%d\r\n", __func__, txpwr_lvl->ofdm64qam_5g); + AICWFDBG(LOGINFO, "%s:ofdm256qam_5g:%d\r\n", __func__, txpwr_lvl->ofdm256qam_5g); + AICWFDBG(LOGINFO, "%s:ofdm1024qam_5g:%d\r\n", __func__, txpwr_lvl->ofdm1024qam_5g); +} + +void get_userconfig_txpwr_lvl_v2_in_fdrv(txpwr_lvl_conf_v2_t *txpwr_lvl_v2) +{ + *txpwr_lvl_v2 = userconfig_info.txpwr_lvl_v2; + + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v2->enable); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[11]); +} + +void get_userconfig_txpwr_lvl_v3_in_fdrv(txpwr_lvl_conf_v3_t *txpwr_lvl_v3) +{ + *txpwr_lvl_v3 = userconfig_info.txpwr_lvl_v3; + + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v3->enable); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[11]); + + AICWFDBG(LOGINFO, "%s:lvl_11a_1m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11a_2m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11a_5m5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11a_11m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11a_6m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11a_9m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11a_12m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11a_18m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11a_24m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11a_36m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11a_48m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[10]); + AICWFDBG(LOGINFO, "%s:lvl_11a_54m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[11]); +} + +void get_userconfig_txpwr_lvl_v4_in_fdrv(txpwr_lvl_conf_v4_t *txpwr_lvl_v4) +{ + *txpwr_lvl_v4 = userconfig_info.txpwr_lvl_v4; + + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v4->enable); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[0]); +#if 0//comment verbose printing + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11b_11ag_2g4[11]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v4->pwrlvl_11n_11ac_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_2g4[11]); + + AICWFDBG(LOGINFO, "%s:lvl_11a_6m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11a_9m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11a_12m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11a_18m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11a_24m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11a_36m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11a_48m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11a_54m_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11a_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11n_11ac_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[5]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[6]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[7]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[8]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[9]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[10]); + AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_5g:%d\r\n", __func__, txpwr_lvl_v4->pwrlvl_11ax_5g[11]); +#endif +} + +void get_userconfig_txpwr_lvl_adj_in_fdrv(txpwr_lvl_adj_conf_t *txpwr_lvl_adj) +{ + *txpwr_lvl_adj = userconfig_info.txpwr_lvl_adj; + + AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_adj->enable); + AICWFDBG(LOGINFO, "%s:lvl_adj_2g4_chan_1_4:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_2g4[0]); + AICWFDBG(LOGINFO, "%s:lvl_adj_2g4_chan_5_9:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_2g4[1]); + AICWFDBG(LOGINFO, "%s:lvl_adj_2g4_chan_10_13:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_2g4[2]); + + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_42:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[0]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_58:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[1]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_106:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[2]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_122:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[3]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_138:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[4]); + AICWFDBG(LOGINFO, "%s:lvl_adj_5g_chan_155:%d\r\n", __func__, txpwr_lvl_adj->pwrlvl_adj_tbl_5g[5]); +} + + +void get_userconfig_txpwr_ofst_in_fdrv(txpwr_ofst_conf_t *txpwr_ofst) +{ + txpwr_ofst->enable = userconfig_info.txpwr_ofst.enable; + txpwr_ofst->chan_1_4 = userconfig_info.txpwr_ofst.chan_1_4; + txpwr_ofst->chan_5_9 = userconfig_info.txpwr_ofst.chan_5_9; + txpwr_ofst->chan_10_13 = userconfig_info.txpwr_ofst.chan_10_13; + txpwr_ofst->chan_36_64 = userconfig_info.txpwr_ofst.chan_36_64; + txpwr_ofst->chan_100_120 = userconfig_info.txpwr_ofst.chan_100_120; + txpwr_ofst->chan_122_140 = userconfig_info.txpwr_ofst.chan_122_140; + txpwr_ofst->chan_142_165 = userconfig_info.txpwr_ofst.chan_142_165; + + AICWFDBG(LOGINFO, "%s:enable :%d\r\n", __func__, txpwr_ofst->enable); + AICWFDBG(LOGINFO, "%s:chan_1_4 :%d\r\n", __func__, txpwr_ofst->chan_1_4); + AICWFDBG(LOGINFO, "%s:chan_5_9 :%d\r\n", __func__, txpwr_ofst->chan_5_9); + AICWFDBG(LOGINFO, "%s:chan_10_13 :%d\r\n", __func__, txpwr_ofst->chan_10_13); + AICWFDBG(LOGINFO, "%s:chan_36_64 :%d\r\n", __func__, txpwr_ofst->chan_36_64); + AICWFDBG(LOGINFO, "%s:chan_100_120:%d\r\n", __func__, txpwr_ofst->chan_100_120); + AICWFDBG(LOGINFO, "%s:chan_122_140:%d\r\n", __func__, txpwr_ofst->chan_122_140); + AICWFDBG(LOGINFO, "%s:chan_142_165:%d\r\n", __func__, txpwr_ofst->chan_142_165); +} + +void get_userconfig_txpwr_ofst2x_in_fdrv(txpwr_ofst2x_conf_t *txpwr_ofst2x) +{ + int type, ch_grp; + *txpwr_ofst2x = userconfig_info.txpwr_ofst2x; + AICWFDBG(LOGINFO, "%s:enable :%d\r\n", __func__, txpwr_ofst2x->enable); + AICWFDBG(LOGINFO, "pwrofst2x 2.4g: [0]:11b, [1]:ofdm_highrate, [2]:ofdm_lowrate\n" + " chan=" "\t1-4" "\t5-9" "\t10-13"); + for (type = 0; type < 3; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 3; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d", txpwr_ofst2x->pwrofst2x_tbl_2g4[type][ch_grp]); + } + } + AICWFDBG(LOGINFO, "\npwrofst2x 5g: [0]:ofdm_lowrate, [1]:ofdm_highrate, [2]:ofdm_midrate\n" + " chan=" "\t36-50" "\t51-64" "\t98-114" "\t115-130" "\t131-146" "\t147-166"); + for (type = 0; type < 3; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 6; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d", txpwr_ofst2x->pwrofst2x_tbl_5g[type][ch_grp]); + } + } + AICWFDBG(LOGINFO, "\n"); +} + +void get_userconfig_txpwr_ofst2x_v2_in_fdrv(txpwr_ofst2x_conf_v2_t *txpwr_ofst2x_v2) +{ + int type, ch_grp; + *txpwr_ofst2x_v2 = userconfig_info.txpwr_ofst2x_v2; + AICWFDBG(LOGINFO, "%s:enable :%d\r\n", __func__, txpwr_ofst2x_v2->enable); + AICWFDBG(LOGINFO, "pwrofst2x 2.4g(ant0/ant1): [0]:11b, [1]:ofdm_highrate\n" + " chan=" "\t1-4" "\t5-9" "\t10-13"); + for (type = 0; type < 2; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 3; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d/%d", txpwr_ofst2x_v2->pwrofst2x_tbl_2g4_ant0[ch_grp][type], txpwr_ofst2x_v2->pwrofst2x_tbl_2g4_ant1[ch_grp][type]); + } + } + AICWFDBG(LOGINFO, "\npwrofst2x 5g: [0]:ofdm_highrate\n" + " chan=" "\t36-50" "\t51-64" "\t98-114" "\t115-130" "\t131-146" "\t147-166"); + for (type = 0; type < 1; type++) { + AICWFDBG(LOGINFO, "\n [%d] =", type); + for (ch_grp = 0; ch_grp < 6; ch_grp++) { + AICWFDBG(LOGINFO, "\t%d/%d", txpwr_ofst2x_v2->pwrofst2x_tbl_5g_ant0[ch_grp][type], txpwr_ofst2x_v2->pwrofst2x_tbl_5g_ant1[ch_grp][type]); + } + } + AICWFDBG(LOGINFO, "\n"); +} +void get_userconfig_txpwr_loss(txpwr_loss_conf_t *txpwr_loss) +{ + txpwr_loss->loss_enable_2g4 = userconfig_info.txpwr_loss.loss_enable_2g4; + txpwr_loss->loss_value_2g4 = userconfig_info.txpwr_loss.loss_value_2g4; + txpwr_loss->loss_enable_5g = userconfig_info.txpwr_loss.loss_enable_5g; + txpwr_loss->loss_value_5g = userconfig_info.txpwr_loss.loss_value_5g; + + AICWFDBG(LOGDEBUG, "%s:loss_enable_2g4: %d, val_2g4: %d, loss_enable_5g: %d, val_5g: %d\r\n", __func__, + txpwr_loss->loss_enable_2g4, txpwr_loss->loss_value_2g4, + txpwr_loss->loss_enable_5g, txpwr_loss->loss_value_5g); +} +void set_txpwr_loss_ofst(s8_l value) +{ + userconfig_info.txpwr_loss.loss_enable_2g4 = 1; + userconfig_info.txpwr_loss.loss_value_2g4 = -value; + userconfig_info.txpwr_loss.loss_enable_5g = 1; + userconfig_info.txpwr_loss.loss_value_5g = -value; + AICWFDBG(LOGINFO, "%s:value:%d\r\n", __func__, value); +} + +void get_userconfig_xtal_cap(xtal_cap_conf_t *xtal_cap) +{ + *xtal_cap = userconfig_info.xtal_cap; + + AICWFDBG(LOGINFO, "%s:enable :%d\r\n", __func__, xtal_cap->enable); + AICWFDBG(LOGINFO, "%s:xtal_cap :%d\r\n", __func__, xtal_cap->xtal_cap); + AICWFDBG(LOGINFO, "%s:xtal_cap_fine:%d\r\n", __func__, xtal_cap->xtal_cap_fine); +} + +void rwnx_plat_nvram_set_value(char *command, char *value) +{ + //TODO send command + AICWFDBG(LOGINFO, "%s:command=%s value=%s\n", __func__, command, value); + if (!strcmp(command, "enable")) { + userconfig_info.txpwr_lvl.enable = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v2.enable = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.enable = rwnx_atoi(value); + } else if (!strcmp(command, "dsss")) { + userconfig_info.txpwr_lvl.dsss = rwnx_atoi(value); + } else if (!strcmp(command, "ofdmlowrate_2g4")) { + userconfig_info.txpwr_lvl.ofdmlowrate_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "ofdm64qam_2g4")) { + userconfig_info.txpwr_lvl.ofdm64qam_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "ofdm256qam_2g4")) { + userconfig_info.txpwr_lvl.ofdm256qam_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "ofdm1024qam_2g4")) { + userconfig_info.txpwr_lvl.ofdm1024qam_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "ofdmlowrate_5g")) { + userconfig_info.txpwr_lvl.ofdmlowrate_5g = rwnx_atoi(value); + } else if (!strcmp(command, "ofdm64qam_5g")) { + userconfig_info.txpwr_lvl.ofdm64qam_5g = rwnx_atoi(value); + } else if (!strcmp(command, "ofdm256qam_5g")) { + userconfig_info.txpwr_lvl.ofdm256qam_5g = rwnx_atoi(value); + } else if (!strcmp(command, "ofdm1024qam_5g")) { + userconfig_info.txpwr_lvl.ofdm1024qam_5g = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_1m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[0] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_2m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[1] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_5m5_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[2] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_11m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[3] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_6m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[4] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_9m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[5] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_12m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[6] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_18m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[7] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_24m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[8] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_36m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[9] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_48m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[10] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[10] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_54m_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[11] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[11] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs0_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[0] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs1_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[1] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs2_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[2] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs3_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[3] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs4_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[4] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs5_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[5] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs6_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[6] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs7_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[7] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs8_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[8] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs9_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[9] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs0_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[0] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs1_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[1] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs2_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[2] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs3_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[3] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs4_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[4] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs5_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[5] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs6_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[6] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs7_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[7] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs8_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[8] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs9_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[9] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs10_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[10] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[10] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs11_2g4")) { + userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[11] = rwnx_atoi(value); + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[11] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_1m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_2m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_5m5_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_11m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_6m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_9m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_12m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_18m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_24m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_36m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_48m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[10] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_54m_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[11] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs0_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs1_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs2_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs3_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs4_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs5_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs6_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs7_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs8_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs9_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs0_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs1_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs2_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs3_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs4_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs5_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs6_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs7_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs8_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs9_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs10_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[10] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs11_5g")) { + userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[11] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_enable")) { + userconfig_info.txpwr_lvl_adj.enable = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_2g4_chan_1_4")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_2g4_chan_5_9")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_2g4_chan_10_13")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_42")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_58")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_106")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_122")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_138")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_155")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "loss_enable_2g4")) { + userconfig_info.txpwr_loss.loss_enable_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "loss_value_2g4")) { + userconfig_info.txpwr_loss.loss_value_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "loss_enable_5g")) { + userconfig_info.txpwr_loss.loss_enable_5g = rwnx_atoi(value); + } else if (!strcmp(command, "loss_value_5g")) { + userconfig_info.txpwr_loss.loss_value_5g = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_enable")) { + userconfig_info.txpwr_ofst.enable = rwnx_atoi(value); + userconfig_info.txpwr_ofst2x.enable = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_chan_1_4")) { + userconfig_info.txpwr_ofst.chan_1_4 = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_chan_5_9")) { + userconfig_info.txpwr_ofst.chan_5_9 = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_chan_10_13")) { + userconfig_info.txpwr_ofst.chan_10_13 = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_chan_36_64")) { + userconfig_info.txpwr_ofst.chan_36_64 = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_chan_100_120")) { + userconfig_info.txpwr_ofst.chan_100_120 = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_chan_122_140")) { + userconfig_info.txpwr_ofst.chan_122_140 = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_chan_142_165")) { + userconfig_info.txpwr_ofst.chan_142_165 = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_11b_chan_1_4")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[0][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_11b_chan_5_9")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[0][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_11b_chan_10_13")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[0][2] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ofdm_highrate_chan_1_4")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[1][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ofdm_highrate_chan_5_9")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[1][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ofdm_highrate_chan_10_13")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[1][2] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ofdm_lowrate_chan_1_4")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[2][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ofdm_lowrate_chan_5_9")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[2][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ofdm_lowrate_chan_10_13")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_2g4[2][2] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_lowrate_chan_42")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[0][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_lowrate_chan_58")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[0][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_lowrate_chan_106")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[0][2] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_lowrate_chan_122")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[0][3] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_lowrate_chan_138")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[0][4] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_lowrate_chan_155")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[0][5] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_highrate_chan_42")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[1][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_highrate_chan_58")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[1][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_highrate_chan_106")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[1][2] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_highrate_chan_122")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[1][3] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_highrate_chan_138")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[1][4] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_highrate_chan_155")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[1][5] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_midrate_chan_42")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[2][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_midrate_chan_58")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[2][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_midrate_chan_106")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[2][2] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_midrate_chan_122")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[2][3] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_midrate_chan_138")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[2][4] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ofdm_midrate_chan_155")) { + userconfig_info.txpwr_ofst2x.pwrofst2x_tbl_5g[2][5] = rwnx_atoi(value); + } else if (!strcmp(command, "xtal_enable")) { + userconfig_info.xtal_cap.enable = rwnx_atoi(value); + } else if (!strcmp(command, "xtal_cap")) { + userconfig_info.xtal_cap.xtal_cap = rwnx_atoi(value); + } else if (!strcmp(command, "xtal_cap_fine")) { + userconfig_info.xtal_cap.xtal_cap_fine = rwnx_atoi(value); + } else { + AICWFDBG(LOGERROR, "invalid cmd: %s\n", command); + } + +} +void rwnx_plat_nvram_set_value_8800d80x2(char *command, char *value) +{ + //TODO send command + AICWFDBG(LOGINFO, "%s:command=%s value=%s\n", __func__, command, value); + if (!strcmp(command, "enable")) { + userconfig_info.txpwr_lvl_v4.enable = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_1m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_2m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_5m5_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_11m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_6m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_9m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_12m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_18m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_24m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_36m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_48m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[10] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11b_11ag_54m_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11b_11ag_2g4[11] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs0_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs1_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs2_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs3_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs4_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs5_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs6_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs7_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs8_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs9_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_2g4[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs0_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs1_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs2_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs3_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs4_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs5_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs6_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs7_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs8_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs9_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs10_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[10] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs11_2g4")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_2g4[11] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_6m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_9m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_12m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_18m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_24m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_36m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_48m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11a_54m_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11a_5g[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs0_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs1_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs2_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs3_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs4_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs5_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs6_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs7_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs8_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11n_11ac_mcs9_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11n_11ac_5g[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs0_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs1_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs2_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs3_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs4_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs5_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs6_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[6] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs7_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[7] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs8_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[8] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs9_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[9] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs10_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[10] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_11ax_mcs11_5g")) { + userconfig_info.txpwr_lvl_v4.pwrlvl_11ax_5g[11] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_enable")) { + userconfig_info.txpwr_lvl_adj.enable = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_2g4_chan_1_4")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_2g4[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_2g4_chan_5_9")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_2g4[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_2g4_chan_10_13")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_2g4[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_42")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[0] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_58")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[1] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_106")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[2] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_122")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[3] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_138")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[4] = rwnx_atoi(value); + } else if (!strcmp(command, "lvl_adj_5g_chan_155")) { + userconfig_info.txpwr_lvl_adj.pwrlvl_adj_tbl_5g[5] = rwnx_atoi(value); + } else if (!strcmp(command, "loss_enable_2g4")) { + userconfig_info.txpwr_loss.loss_enable_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "loss_value_2g4")) { + userconfig_info.txpwr_loss.loss_value_2g4 = rwnx_atoi(value); + } else if (!strcmp(command, "loss_enable_5g")) { + userconfig_info.txpwr_loss.loss_enable_5g = rwnx_atoi(value); + } else if (!strcmp(command, "loss_value_5g")) { + userconfig_info.txpwr_loss.loss_value_5g = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_enable")) { + userconfig_info.txpwr_ofst2x_v2.enable = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant0_11b_chan_1_4")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant0[0][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant0_11b_chan_5_9")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant0[1][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant0_11b_chan_10_13")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant0[2][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant0_ofdm_highrate_chan_1_4")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant0[0][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant0_ofdm_highrate_chan_5_9")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant0[1][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant0_ofdm_highrate_chan_10_13")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant0[2][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant1_11b_chan_1_4")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant1[0][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant1_11b_chan_5_9")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant1[1][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant1_11b_chan_10_13")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant1[2][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant1_ofdm_highrate_chan_1_4")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant1[0][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant1_ofdm_highrate_chan_5_9")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant1[1][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_2g4_ant1_ofdm_highrate_chan_10_13")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_2g4_ant1[2][1] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant0_ofdm_highrate_chan_42")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant0[0][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant0_ofdm_highrate_chan_58")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant0[1][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant0_ofdm_highrate_chan_106")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant0[2][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant0_ofdm_highrate_chan_122")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant0[3][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant0_ofdm_highrate_chan_138")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant0[4][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant0_ofdm_highrate_chan_155")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant0[5][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant1_ofdm_highrate_chan_42")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant1[0][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant1_ofdm_highrate_chan_58")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant1[1][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant1_ofdm_highrate_chan_106")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant1[2][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant1_ofdm_highrate_chan_122")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant1[3][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant1_ofdm_highrate_chan_138")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant1[4][0] = rwnx_atoi(value); + } else if (!strcmp(command, "ofst_5g_ant1_ofdm_highrate_chan_155")) { + userconfig_info.txpwr_ofst2x_v2.pwrofst2x_tbl_5g_ant1[5][0] = rwnx_atoi(value); + } else if (!strcmp(command, "xtal_enable")) { + userconfig_info.xtal_cap.enable = rwnx_atoi(value); + } else if (!strcmp(command, "xtal_cap")) { + userconfig_info.xtal_cap.xtal_cap = rwnx_atoi(value); + } else if (!strcmp(command, "xtal_cap_fine")) { + userconfig_info.xtal_cap.xtal_cap_fine = rwnx_atoi(value); + } else { + AICWFDBG(LOGERROR, "invalid cmd: %s\n", command); + } +} + +void rwnx_plat_userconfig_parsing_8800d80x2(char *buffer, int size) +{ + int i = 0; + int parse_state = 0; + char command[100]; + char value[100]; + int char_counter = 0; + + memset(command, 0, 64); + memset(value, 0, 100); + + for (i = 0; i < size; i++) { + //Send command or print nvram log when char is \r or \n + if (buffer[i] == 0x0a || buffer[i] == 0x0d) { + if (command[0] != 0 && value[0] != 0) { + if (parse_state == PRINT) { + AICWFDBG(LOGINFO, "%s:%s\r\n", __func__, value); + } else if (parse_state == GET_VALUE) { + rwnx_plat_nvram_set_value_8800d80x2(command, value); + } + } + //Reset command value and char_counter + memset(command, 0, 64); + memset(value, 0, 100); + char_counter = 0; + parse_state = INIT; + continue; + } + + //Switch parser state + if (parse_state == INIT) { + if (buffer[i] == '#') { + parse_state = PRINT; + continue; + } else if (buffer[i] == 0x0a || buffer[i] == 0x0d) { + parse_state = INIT; + continue; + } else { + parse_state = CMD; + } + } + + //Fill data to command and value + if (parse_state == PRINT) { + command[0] = 0x01; + value[char_counter] = buffer[i]; + char_counter++; + } else if (parse_state == CMD) { + if (command[0] != 0 && buffer[i] == '=') { + parse_state = GET_VALUE; + char_counter = 0; + continue; + } + command[char_counter] = buffer[i]; + char_counter++; + } else if (parse_state == GET_VALUE) { + if(buffer[i] != 0x2D && (buffer[i] < 0x30 || buffer[i] > 0x39)) { + continue; + } + value[char_counter] = buffer[i]; + char_counter++; + } + } +} + +void rwnx_plat_userconfig_parsing(char *buffer, int size) +{ + int i = 0; + int parse_state = 0; + char command[64]; + char value[100]; + int char_counter = 0; + + memset(command, 0, 64); + memset(value, 0, 100); + + for (i = 0; i < size; i++) { + //Send command or print nvram log when char is \r or \n + if (buffer[i] == 0x0a || buffer[i] == 0x0d) { + if (command[0] != 0 && value[0] != 0) { + if (parse_state == PRINT) { + AICWFDBG(LOGINFO, "%s:%s\r\n", __func__, value); + } else if (parse_state == GET_VALUE) { + rwnx_plat_nvram_set_value(command, value); + } + } + //Reset command value and char_counter + memset(command, 0, 64); + memset(value, 0, 100); + char_counter = 0; + parse_state = INIT; + continue; + } + + //Switch parser state + if (parse_state == INIT) { + if (buffer[i] == '#') { + parse_state = PRINT; + continue; + } else if (buffer[i] == 0x0a || buffer[i] == 0x0d) { + parse_state = INIT; + continue; + } else { + parse_state = CMD; + } + } + + //Fill data to command and value + if (parse_state == PRINT) { + command[0] = 0x01; + value[char_counter] = buffer[i]; + char_counter++; + } else if (parse_state == CMD) { + if (command[0] != 0 && buffer[i] == '=') { + parse_state = GET_VALUE; + char_counter = 0; + continue; + } + command[char_counter] = buffer[i]; + char_counter++; + } else if (parse_state == GET_VALUE) { + if(buffer[i] != 0x2D && (buffer[i] < 0x30 || buffer[i] > 0x39)) { + continue; + } + value[char_counter] = buffer[i]; + char_counter++; + } + } +} + +#ifdef CONFIG_POWER_LIMIT +#define GetLineFromBuffer(buffer) strsep(&buffer, "\n") + +int isAllSpaceOrTab(uint8_t *data, uint8_t size) +{ + uint8_t cnt = 0, NumOfSpaceAndTab = 0; + while (size > cnt) { + if (data[cnt] == ' ' || data[cnt] == '\t' || data[cnt] == '\0') + ++NumOfSpaceAndTab; + ++cnt; + } + return size == NumOfSpaceAndTab; +} + +int IsCommentString(char *szStr) +{ + if (*szStr == '#' && *(szStr + 1) == ' ') + return 1; + else + return 0; +} + +int ParseQualifiedString(char *In, u32 *Start, char *Out, char LeftQualifier, char RightQualifier) +{ + u32 i = 0, j = 0; + char c = In[(*Start)++]; + if (c != LeftQualifier) + return 0; + i = (*Start); + c = In[(*Start)++]; + while (c != RightQualifier && c != '\0') + c = In[(*Start)++]; + if (c == '\0') + return 0; + j = (*Start) - 2; + strncpy((char *)Out, (const char *)(In + i), j - i + 1); + return 1; +} + +int GetU1ByteIntegerFromStringInDecimal(char *Str, u8 *pInt) +{ + u16 i = 0; + *pInt = 0; + while (Str[i] != '\0') { + if (Str[i] >= '0' && Str[i] <= '9') { + *pInt *= 10; + *pInt += (Str[i] - '0'); + } else + return 0; + ++i; + } + return 1; +} +int GetS1ByteIntegerFromStringInDecimal(char *str, s8 *val) +{ + u8 negative = 0; + u16 i = 0; + *val = 0; + while (str[i] != '\0') { + if (i == 0 && (str[i] == '+' || str[i] == '-')) { + if (str[i] == '-') + negative = 1; + } else if (str[i] >= '0' && str[i] <= '9') { + *val *= 10; + *val += (str[i] - '0'); + } else + return 0; + ++i; + } + if (negative) + *val = -*val; + return 1; +} + +int8_t rwnx_plat_powerlimit_save(u8_l band, char *channel, u8_l bw, char *limit, char *name) +{ + u8 channel_num, powerLimit_val, reg_idx; + + if (GetU1ByteIntegerFromStringInDecimal((char *)channel, &channel_num) == 0 + || GetS1ByteIntegerFromStringInDecimal((char *)limit, &powerLimit_val) == 0) { + AICWFDBG(LOGERROR, "Illegal index of power limit table [ch %s][val %s]\n", channel, limit); + return -1; + } + + reg_idx = get_region_index(name); + + if (band == PHY_BAND_2G4) { + uint8_t cur_idx = powerlimit_info.txpwr_lmt[reg_idx].ch_cnt_2g4[bw]; + AICWFDBG(LOGINFO, "reg_idx=%d, band=%d, bw=%d, cur_idx=%d, ch=%s, pwr=%s\n", + reg_idx, band, bw, cur_idx, channel, limit); + if (cur_idx < MAC_DOMAINCHANNEL_24G_MAX) { + powerlimit_info.txpwr_lmt[reg_idx].ch_num_2g4[bw][cur_idx] = channel_num; + powerlimit_info.txpwr_lmt[reg_idx].max_pwr_2g4[bw][cur_idx] = powerLimit_val; + powerlimit_info.txpwr_lmt[reg_idx].ch_cnt_2g4[bw]++; + } else { + AICWFDBG(LOGERROR, "band %d chan_cnt reached %d\n", band, MAC_DOMAINCHANNEL_24G_MAX); + AICWFDBG(LOGERROR, "channel=%s(%d) powerLimit=%s(%d)\n", channel, channel_num, limit, powerLimit_val); + } + } else if (band == PHY_BAND_5G) { + uint8_t cur_idx = powerlimit_info.txpwr_lmt[reg_idx].ch_cnt_5g[bw]; + AICWFDBG(LOGINFO, "reg_idx=%d, band=%d, bw=%d, cur_idx=%d, ch=%s, pwr=%s\n", + reg_idx, band, bw, cur_idx, channel, limit); + if (cur_idx < MAC_DOMAINCHANNEL_5G_MAX) { + powerlimit_info.txpwr_lmt[reg_idx].ch_num_5g[bw][cur_idx] = channel_num; + powerlimit_info.txpwr_lmt[reg_idx].max_pwr_5g[bw][cur_idx] = powerLimit_val; + powerlimit_info.txpwr_lmt[reg_idx].ch_cnt_5g[bw]++; + } else { + AICWFDBG(LOGERROR, "band %d chan_cnt reached %d\n", band, MAC_DOMAINCHANNEL_5G_MAX); + AICWFDBG(LOGERROR, "channel=%s(%d) powerLimit=%s(%d)\n", channel, channel_num, limit, powerLimit_val); + } + } + + return 0; +} + + +void rwnx_plat_powerlimit_parsing(char *buffer, int size, char *cc) +{ +#define LD_STAGE_EXC_MAPPING 0 +#define LD_STAGE_TAB_DEFINE 1 +#define LD_STAGE_TAB_START 2 +#define LD_STAGE_COLUMN_DEFINE 3 +#define LD_STAGE_CH_ROW 4 + + uint8_t loadingStage = LD_STAGE_EXC_MAPPING; + uint32_t i = 0, forCnt = 0; + uint32_t i_cc; + char *szLine, *ptmp; + char band[10], colNumBuf[10], bandwidth[10]; + uint8_t colNum = 0, band_cc = 0, bw_cc = 0; + char **reg_name = NULL; + // clear powerlimit info at first + memset((void *)&powerlimit_info, 0, sizeof(powerlimit_info_t)); + ptmp = buffer; + for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) { + if (isAllSpaceOrTab(szLine, sizeof(*szLine))) + continue; + if (IsCommentString(szLine)) + continue; + + if (loadingStage == LD_STAGE_EXC_MAPPING) { + if (szLine[0] == '#' || szLine[1] == '#') { + loadingStage = LD_STAGE_TAB_DEFINE; + } else { + continue; + } + } + + if (loadingStage == LD_STAGE_TAB_DEFINE) { + /* read "## 2.4G" */ + if (szLine[0] != '#' || szLine[1] != '#') + continue; + + /* skip the space */ + i = 2; + while (szLine[i] == ' ' || szLine[i] == '\t') + ++i; + + szLine[--i] = ' '; /* return the space in front of the regulation info */ + + /* Parse the label of the table */ + memset((void *)band, 0, 10); + memset((void *)bandwidth, 0, 10); + memset((void *)colNumBuf, 0, 10); + + if (!ParseQualifiedString(szLine, &i, band, ' ', ',')) { + AICWFDBG(LOGERROR, "Fail to parse band!\n"); + goto exit; + } + if (strncmp(band, "2.4G", 4) == 0) { + band_cc = PHY_BAND_2G4; + } else if (strncmp(band, "5G", 2) == 0) { + band_cc = PHY_BAND_5G; + } + + if (!ParseQualifiedString(szLine, &i, bandwidth, ' ', ',')) { + AICWFDBG(LOGERROR, "Fail to parse bandwidth!\n"); + goto exit; + } + if (strncmp(bandwidth, "20M", 3) == 0) + bw_cc = PHY_CHNL_BW_20; + else if (strncmp(bandwidth, "40M", 3) == 0) + bw_cc = PHY_CHNL_BW_40; + else if (strncmp(bandwidth, "80M", 3) == 0) + bw_cc = PHY_CHNL_BW_80; + + if (!ParseQualifiedString(szLine, &i, colNumBuf, '#', '#')) { + AICWFDBG(LOGERROR, "Fail to parse column number!\n"); + goto exit; + } + if (!GetU1ByteIntegerFromStringInDecimal(colNumBuf, &colNum)) { + AICWFDBG(LOGERROR, "Column number \"%s\" is not unsigned decimal\n", colNumBuf); + goto exit; + } + if (colNum == 0) { + AICWFDBG(LOGERROR, "Column number is 0\n"); + goto exit; + } + + AICWFDBG(LOGINFO, "band=%s, bandwidth=%s, colnum=%d\n", band, bandwidth, colNum); + loadingStage = LD_STAGE_TAB_START; + } else if (loadingStage == LD_STAGE_TAB_START) { + /* read "## START" */ + if (szLine[0] != '#' || szLine[1] != '#') + continue; + + /* skip the space */ + i = 2; + while (szLine[i] == ' ' || szLine[i] == '\t') + ++i; + + if (strncmp((u8 *)(szLine + i), "START", 5)) { + AICWFDBG(LOGERROR, "Missing \"## START\" label\n"); + goto exit; + } + + loadingStage = LD_STAGE_COLUMN_DEFINE; + } else if (loadingStage == LD_STAGE_COLUMN_DEFINE) { + /* read "## FCC ETSI" */ + if (szLine[0] != '#' || szLine[1] != '#') + continue; + + /* skip the space */ + i = 2; + while (szLine[i] == ' ' || szLine[i] == '\t') + ++i; + + reg_name = (char **)kmalloc(sizeof(char *) * colNum, GFP_KERNEL); + if (!reg_name) { + AICWFDBG(LOGERROR, "reg_name alloc fail\n"); + goto exit; + } + + for (forCnt = 0; forCnt < colNum; ++forCnt) { + /* skip the space */ + while (szLine[i] == ' ' || szLine[i] == '\t') + i++; + i_cc = i; + + while (szLine[i] != ' ' && szLine[i] != '\t' && szLine[i] != '\0') + i++; + + reg_name[forCnt] = (char *)kmalloc(i - i_cc + 1, GFP_KERNEL); + if (!reg_name[forCnt]) { + AICWFDBG(LOGERROR, "reg_name element alloc fail\n"); + goto exit; + } + + strncpy(reg_name[forCnt], szLine + i_cc, i - i_cc); + reg_name[forCnt][i - i_cc] = '\0'; + AICWFDBG(LOGINFO, "reg_name: %s\n", reg_name[forCnt]); + + } + + loadingStage = LD_STAGE_CH_ROW; + } else if (loadingStage == LD_STAGE_CH_ROW) { + char channel[10] = {0}, powerLimit[10] = {0}; + u8 cnt = 0; + + /* the table ends */ + if (szLine[0] == '#' && szLine[1] == '#') { + i = 2; + while (szLine[i] == ' ' || szLine[i] == '\t') + ++i; + + if (strncmp((u8 *)(szLine + i), "END", 3) == 0) { + loadingStage = LD_STAGE_TAB_DEFINE; + if (reg_name) { + for (forCnt = 0; forCnt < colNum; ++forCnt) { + if (reg_name[forCnt]) { + kfree(reg_name[forCnt]); + reg_name[forCnt] = NULL; + } + } + kfree(reg_name); + reg_name = NULL; + } + colNum = 0; + continue; + } else { + AICWFDBG(LOGERROR, "Missing \"## END\" label\n"); + goto exit; + } + } + + if ((szLine[0] != 'c' && szLine[0] != 'C') || + (szLine[1] != 'h' && szLine[1] != 'H')) { + AICWFDBG(LOGERROR, "Wrong channel prefix: '%c','%c'(%d,%d)\n", szLine[0], szLine[1], szLine[0], szLine[1]); + continue; + } + i = 2;/* move to the location behind 'h' */ + + /* load the channel number */ + cnt = 0; + while (szLine[i] >= '0' && szLine[i] <= '9') { + channel[cnt] = szLine[i]; + ++cnt; + ++i; + } + + for (forCnt = 0; forCnt < colNum; ++forCnt) { + /* skip the space between channel number and the power limit value */ + while (szLine[i] == ' ' || szLine[i] == '\t') + ++i; + + /* load the power limit value */ + memset((void *)powerLimit, 0, 10); + + if (szLine[i] == 'N' && szLine[i + 1] == 'A') { + /* + * means channel not available + */ + sprintf(powerLimit, "%d", POWER_LIMIT_INVALID_VAL); + i += 2; + } else if ((szLine[i] >= '0' && szLine[i] <= '9') + || szLine[i] == '+' || szLine[i] == '-') { + /* case of dBm value */ + cnt = 0; + while ((szLine[i] >= '0' && szLine[i] <= '9') + || szLine[i] == '+' || szLine[i] == '-' + ) { + powerLimit[cnt] = szLine[i]; + ++cnt; + ++i; + } + } else { + AICWFDBG(LOGERROR, "Wrong limit expression \"%c%c\"(%d, %d)\n", + szLine[i], szLine[i + 1], szLine[i], szLine[i + 1]); + goto exit; + } + + if (rwnx_plat_powerlimit_save(band_cc, channel, bw_cc, powerLimit, reg_name[forCnt]) < 0) + goto exit; + } + } + } + + powerlimit_info.flags |= POWER_LIMIT_CC_MATCHED_BIT; +exit: + if (reg_name) { + for (forCnt = 0; forCnt < colNum; ++forCnt) { + if (reg_name[forCnt]) { + kfree(reg_name[forCnt]); + reg_name[forCnt] = NULL; + } + } + kfree(reg_name); + reg_name = NULL; + } + + return; +} + + +/// 5G lower bound freq +#define PHY_FREQ_5G 5000 + +uint16_t phy_channel_to_freq(uint8_t band, int channel) +{ + if ((band == PHY_BAND_2G4) && (channel >= 1) && (channel <= 14)) { + if (channel == 14) + return 2484; + else + return 2407 + channel * 5; + } else if ((band == PHY_BAND_5G) && (channel >= 1) && (channel <= 165)) { + return PHY_FREQ_5G + channel * 5; + } + return 0; +} + +int8_t get_powerlimit_by_freq(uint8_t band, uint16_t freq, uint8_t r_idx) +{ + int8_t ret = POWER_LIMIT_INVALID_VAL; + uint8_t idx; + if (!(powerlimit_info.flags & POWER_LIMIT_CC_MATCHED_BIT)) { + AICWFDBG(LOGERROR, "powerlimit flag not set\n"); + return ret; + } + + if (band == PHY_BAND_2G4) { + uint8_t idx_cnt = powerlimit_info.txpwr_lmt[r_idx].ch_cnt_2g4[0]; + for (idx = 0; idx < idx_cnt; idx++) { + int ch_num = powerlimit_info.txpwr_lmt[r_idx].ch_num_2g4[0][idx]; + uint16_t freq_tmp = phy_channel_to_freq(PHY_BAND_2G4, ch_num); + if (freq == freq_tmp) { + ret = powerlimit_info.txpwr_lmt[r_idx].max_pwr_2g4[0][idx]; + AICWFDBG(LOGINFO, "[%d]: ch=%d(freq=%d), pwr=%d\n", idx, ch_num, freq, ret); + break; + } + } + if (idx == idx_cnt) + AICWFDBG(LOGERROR, "powerlimit search failed: band=%d freq=%d\n", band, freq); + } else if (band == PHY_BAND_5G) { + uint8_t idx_cnt = powerlimit_info.txpwr_lmt[r_idx].ch_cnt_5g[0]; + for (idx = 0; idx < idx_cnt; idx++) { + int ch_num = powerlimit_info.txpwr_lmt[r_idx].ch_num_5g[0][idx]; + uint16_t freq_tmp = phy_channel_to_freq(PHY_BAND_5G, ch_num); + if (freq == freq_tmp) { + ret = powerlimit_info.txpwr_lmt[r_idx].max_pwr_5g[0][idx]; + AICWFDBG(LOGINFO, "[%d]: ch=%d(freq=%d), pwr=%d\n", idx, ch_num, freq, ret); + break; + } + } + if (idx == idx_cnt) + AICWFDBG(LOGERROR, "powerlimit search failed: band=%d freq=%d\n", band, freq); + } + return ret; +} + +int8_t get_powerlimit_by_chnum(uint8_t chnum, uint8_t r_idx, uint8_t bw) +{ + int8_t ret = POWER_LIMIT_INVALID_VAL; + uint8_t idx; + if (!(powerlimit_info.flags & POWER_LIMIT_CC_MATCHED_BIT)) { + AICWFDBG(LOGERROR, "powerlimit flag not set\n"); + return ret; + } + + if (chnum <= 14) { + uint8_t idx_cnt = powerlimit_info.txpwr_lmt[r_idx].ch_cnt_2g4[bw]; + for (idx = 0; idx < idx_cnt; idx++) { + uint8_t ch_num = powerlimit_info.txpwr_lmt[r_idx].ch_num_2g4[bw][idx]; + if (chnum == ch_num) { + ret = powerlimit_info.txpwr_lmt[r_idx].max_pwr_2g4[bw][idx]; + AICWFDBG(LOGINFO, "[%d]: ch=%d, pwr=%d\n", idx, ch_num, ret); + break; + } + } + if (idx == idx_cnt) + AICWFDBG(LOGERROR, "%s powerlimit search failed: chnum=%d, please confirm the center frequency\n", + __func__, chnum); + } else if (chnum <= 165) { + uint8_t idx_cnt = powerlimit_info.txpwr_lmt[r_idx].ch_cnt_5g[bw]; + for (idx = 0; idx < idx_cnt; idx++) { + uint8_t ch_num = powerlimit_info.txpwr_lmt[r_idx].ch_num_5g[bw][idx]; + if (chnum == ch_num) { + ret = powerlimit_info.txpwr_lmt[r_idx].max_pwr_5g[bw][idx]; + AICWFDBG(LOGINFO, "[%d]: ch=%d, pwr=%d\n", idx, ch_num, ret); + break; + } + } + if (idx == idx_cnt) + AICWFDBG(LOGERROR, "%s powerlimit search failed: chnum=%d, please confirm the center frequency\n", + __func__, chnum); + } + + return ret; +} +#endif + +/** + * rwnx_plat_userconfig_load ---Load aic_userconfig.txt + *@filename name of config +*/ +static int rwnx_plat_userconfig_load(struct rwnx_hw *rwnx_hw) { + + if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC){ + rwnx_plat_userconfig_load_8800dc(rwnx_hw); + #ifdef CONFIG_POWER_LIMIT + rwnx_plat_powerlimit_load_8800dcdw(rwnx_hw, PRODUCT_ID_AIC8800DC); + #endif + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){ + rwnx_plat_userconfig_load_8800dw(rwnx_hw); + #ifdef CONFIG_POWER_LIMIT + rwnx_plat_powerlimit_load_8800dcdw(rwnx_hw, PRODUCT_ID_AIC8800DW); + #endif + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){ + rwnx_plat_userconfig_load_8800d80(rwnx_hw); + }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81X2 || + rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D89X2){ + rwnx_plat_userconfig_load_8800d80x2(rwnx_hw); + } + + return 0; +} + + +/** + * rwnx_platform_on() - Start the platform + * + * @rwnx_hw: Main driver data + * @config: Config to restore (NULL if nothing to restore) + * + * It starts the platform : + * - load fw and ucodes + * - initialize IPC + * - boot the fw + * - enable link communication/IRQ + * + * Called by 802.11 part + */ +int rwnx_platform_on(struct rwnx_hw *rwnx_hw, void *config) +{ + #if 0 + u8 *shared_ram; + #endif +#ifdef CONFIG_ROM_PATCH_EN + int ret = 0; +#endif + + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (rwnx_plat->enabled) + return 0; + + #if 0 + if (rwnx_platform_reset(rwnx_plat)) + return -1; + + rwnx_plat_mpif_sel(rwnx_plat); + + if ((ret = rwnx_plat_fcu_load(rwnx_hw))) + return ret; + if ((ret = rwnx_plat_agc_load(rwnx_plat))) + return ret; + if ((ret = rwnx_ldpc_load(rwnx_hw))) + return ret; + if ((ret = rwnx_plat_lmac_load(rwnx_plat))) + return ret; + + shared_ram = RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, SHARED_RAM_START_ADDR); + if ((ret = rwnx_ipc_init(rwnx_hw, shared_ram))) + return ret; + + if ((ret = rwnx_plat->enable(rwnx_hw))) + return ret; + RWNX_REG_WRITE(BOOTROM_ENABLE, rwnx_plat, + RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR); + + #if 0 + if ((ret = rwnx_fw_trace_config_filters(rwnx_get_shared_trace_buf(rwnx_hw), + rwnx_ipc_fw_trace_desc_get(rwnx_hw), + rwnx_hw->mod_params->ftl))) + #endif + + #ifndef CONFIG_RWNX_FHOST + if ((ret = rwnx_check_fw_compatibility(rwnx_hw))) + { + rwnx_hw->plat->disable(rwnx_hw); + tasklet_kill(&rwnx_hw->task); + rwnx_ipc_deinit(rwnx_hw); + return ret; + } + #endif /* !CONFIG_RWNX_FHOST */ + + if (config) + rwnx_term_restore_config(rwnx_plat, config); + + rwnx_ipc_start(rwnx_hw); + #else + #ifndef CONFIG_ROM_PATCH_EN + #ifdef CONFIG_DOWNLOAD_FW + if ((ret = rwnx_plat_fmac_load(rwnx_hw))) + return ret; + #endif /* !CONFIG_ROM_PATCH_EN */ + #endif + #endif + +#ifdef CONFIG_ROM_PATCH_EN + ret = rwnx_plat_patch_load(rwnx_hw); + if (ret) { + return ret; + } +#endif + + rwnx_plat_userconfig_load(rwnx_hw); + + + //rwnx_plat->enabled = true; + + return 0; +} + +/** + * rwnx_platform_off() - Stop the platform + * + * @rwnx_hw: Main driver data + * @config: Updated with pointer to config, to be able to restore it with + * rwnx_platform_on(). It's up to the caller to free the config. Set to NULL + * if configuration is not needed. + * + * Called by 802.11 part + */ +void rwnx_platform_off(struct rwnx_hw *rwnx_hw, void **config) +{ +#if defined(AICWF_USB_SUPPORT) || defined(AICWF_SDIO_SUPPORT) + tasklet_kill(&rwnx_hw->task); + rwnx_hw->plat->enabled = false; + return ; +#endif + + if (!rwnx_hw->plat->enabled) { + if (config) + *config = NULL; + return; + } + +#ifdef AICWF_PCIE_SUPPORT + rwnx_ipc_stop(rwnx_hw); +#endif + + if (config) + *config = rwnx_term_save_config(rwnx_hw->plat); + + rwnx_hw->plat->disable(rwnx_hw); + + tasklet_kill(&rwnx_hw->task); + +#ifdef AICWF_PCIE_SUPPORT + rwnx_ipc_deinit(rwnx_hw); +#endif + + + rwnx_platform_reset(rwnx_hw->plat); + + rwnx_hw->plat->enabled = false; +} + +/** + * rwnx_platform_init() - Initialize the platform + * + * @rwnx_plat: platform data (already updated by platform driver) + * @platform_data: Pointer to store the main driver data pointer (aka rwnx_hw) + * That will be set as driver data for the platform driver + * Return: 0 on success, < 0 otherwise + * + * Called by the platform driver after it has been probed + */ +int rwnx_platform_init(struct rwnx_plat *rwnx_plat, void **platform_data) +{ + int ret = 0; + RWNX_DBG(RWNX_FN_ENTRY_STR); + + rwnx_plat->enabled = false; + rwnx_plat->wait_disconnect_cb = false; + g_rwnx_plat = rwnx_plat; + +#if defined CONFIG_RWNX_FULLMAC + AICWFDBG(LOGINFO, "%s rwnx_cfg80211_init enter \r\n", __func__); + ret = rwnx_cfg80211_init(rwnx_plat, platform_data); + AICWFDBG(LOGINFO, "%s rwnx_cfg80211_init exit \r\n", __func__); +#if defined(AICWF_USB_SUPPORT) && defined(CONFIG_VENDOR_GPIO) + // initialize gpiob2, gpiob3, gpiob5 to output mode and set output to 0 + rwnx_send_dbg_gpio_init_req(rwnx_plat->usbdev->rwnx_hw, 2, 1, 0);//gpiob 2 = 0 + rwnx_send_dbg_gpio_init_req(rwnx_plat->usbdev->rwnx_hw, 3, 1, 0);//gpiob 3 = 0 + rwnx_send_dbg_gpio_init_req(rwnx_plat->usbdev->rwnx_hw, 5, 1, 0);//gpiob 5 = 0 + + // read gpiob2 + //struct dbg_gpio_read_cfm gpio_rd_cfm; + //rwnx_send_dbg_gpio_read_req(rwnx_plat->usbdev->rwnx_hw, 2, &gpio_rd_cfm); + //AICWFDBG(LOGINFO, "gpio_rd_cfm idx:%d val:%d\n", gpio_rd_cfm.gpio_idx, gpio_rd_cfm.gpio_val); + + // set gpiob2 output to 1 + //rwnx_send_dbg_gpio_write_req(rwnx_plat->usbdev->rwnx_hw, 2, 1); +#endif + + return ret; +#elif defined CONFIG_RWNX_FHOST + return rwnx_fhost_init(rwnx_plat, platform_data); +#endif +} + +/** + * rwnx_platform_deinit() - Deinitialize the platform + * + * @rwnx_hw: main driver data + * + * Called by the platform driver after it is removed + */ +void rwnx_platform_deinit(struct rwnx_hw *rwnx_hw) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); + +#if defined CONFIG_RWNX_FULLMAC + rwnx_cfg80211_deinit(rwnx_hw); +#elif defined CONFIG_RWNX_FHOST + rwnx_fhost_deinit(rwnx_hw); +#endif +} + +/** + * rwnx_platform_register_drv() - Register all possible platform drivers + */ +int rwnx_platform_register_drv(void) +{ + return rwnx_pci_register_drv(); +} + + +/** + * rwnx_platform_unregister_drv() - Unegister all platform drivers + */ +void rwnx_platform_unregister_drv(void) +{ + return rwnx_pci_unregister_drv(); +} + +struct device *rwnx_platform_get_dev(struct rwnx_plat *rwnx_plat) +{ +#ifdef AICWF_SDIO_SUPPORT + return rwnx_plat->sdiodev->dev; +#endif +#ifdef AICWF_USB_SUPPORT + return rwnx_plat->usbdev->dev; +#endif + return &(rwnx_plat->pci_dev->dev); +} + + +#ifndef CONFIG_RWNX_SDM +MODULE_FIRMWARE(RWNX_AGC_FW_NAME); +MODULE_FIRMWARE(RWNX_FCU_FW_NAME); +MODULE_FIRMWARE(RWNX_LDPC_RAM_NAME); +#endif +MODULE_FIRMWARE(RWNX_MAC_FW_NAME); +#ifndef CONFIG_RWNX_TL4 +MODULE_FIRMWARE(RWNX_MAC_FW_NAME2); +#endif + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_platform.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_platform.h new file mode 100644 index 0000000..7d54740 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_platform.h @@ -0,0 +1,186 @@ +/** + ****************************************************************************** + * + * @file rwnx_platorm.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_PLATFORM_H_ +#define _RWNX_PLATFORM_H_ + +#include +#include "lmac_msg.h" + + +#define RWNX_CONFIG_FW_NAME "rwnx_settings.ini" +#define RWNX_PHY_CONFIG_TRD_NAME "rwnx_trident.ini" +#define RWNX_PHY_CONFIG_KARST_NAME "rwnx_karst.ini" +#define RWNX_AGC_FW_NAME "agcram.bin" +#define RWNX_LDPC_RAM_NAME "ldpcram.bin" +#ifdef CONFIG_RWNX_FULLMAC +#define RWNX_MAC_FW_BASE_NAME "fmacfw" +#elif defined CONFIG_RWNX_FHOST +#define RWNX_MAC_FW_BASE_NAME "fhostfw" +#endif /* CONFIG_RWNX_FULLMAC */ + +#ifdef CONFIG_RWNX_TL4 +#define RWNX_MAC_FW_NAME RWNX_MAC_FW_BASE_NAME".hex" +#else +#define RWNX_MAC_FW_NAME RWNX_MAC_FW_BASE_NAME".ihex" +#define RWNX_MAC_FW_NAME2 RWNX_MAC_FW_BASE_NAME".bin" +#endif + +#define RWNX_FCU_FW_NAME "fcuram.bin" +#if (defined(CONFIG_DPD) && !defined(CONFIG_FORCE_DPD_CALIB)) +#define FW_DPDRESULT_NAME_8800DC "aic_dpdresult_lite_8800dc.bin" +#endif + +#define POWER_LEVEL_INVALID_VAL (127) + +enum { + FW_NORMAL_MODE = 0, + FW_RFTEST_MODE = 1, + FW_BLE_SCAN_WAKEUP_MODE = 2, + FW_M2D_OTA_MODE = 3, + FW_DPDCALIB_MODE = 4, + FW_BLE_SCAN_AD_FILTER_MODE = 5, +}; + + +/** + * Type of memory to access (cf rwnx_plat.get_address) + * + * @RWNX_ADDR_CPU To access memory of the embedded CPU + * @RWNX_ADDR_SYSTEM To access memory/registers of one subsystem of the + * embedded system + * + */ +enum rwnx_platform_addr { + RWNX_ADDR_CPU, + RWNX_ADDR_SYSTEM, + RWNX_ADDR_MAX, +}; + +typedef struct +{ + txpwr_lvl_conf_t txpwr_lvl; + txpwr_lvl_conf_v2_t txpwr_lvl_v2; + txpwr_lvl_conf_v3_t txpwr_lvl_v3; + txpwr_lvl_conf_v4_t txpwr_lvl_v4; + txpwr_lvl_adj_conf_t txpwr_lvl_adj; + txpwr_loss_conf_t txpwr_loss; + txpwr_ofst_conf_t txpwr_ofst; + txpwr_ofst2x_conf_t txpwr_ofst2x; + txpwr_ofst2x_conf_v2_t txpwr_ofst2x_v2; + xtal_cap_conf_t xtal_cap; +} userconfig_info_t; + +extern userconfig_info_t userconfig_info; + +typedef enum { + REGIONS_SRRC, + REGIONS_FCC, + REGIONS_ETSI, + REGIONS_JP, + REGIONS_DEFAULT, +} Regions_code; + + +struct rwnx_hw; + +/** + * struct rwnx_plat - Operation pointers for RWNX PCI platform + * + * @pci_dev: pointer to pci dev + * @enabled: Set if embedded platform has been enabled (i.e. fw loaded and + * ipc started) + * @enable: Configure communication with the fw (i.e. configure the transfers + * enable and register interrupt) + * @disable: Stop communication with the fw + * @deinit: Free all ressources allocated for the embedded platform + * @get_address: Return the virtual address to access the requested address on + * the platform. + * @ack_irq: Acknowledge the irq at link level. + * @get_config_reg: Return the list (size + pointer) of registers to restore in + * order to reload the platform while keeping the current configuration. + * + * @priv Private data for the link driver + */ +struct rwnx_plat { + struct pci_dev *pci_dev; + +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev; +#endif + +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usbdev; +#endif + bool enabled; + bool wait_disconnect_cb; + + int (*enable)(struct rwnx_hw *rwnx_hw); + int (*disable)(struct rwnx_hw *rwnx_hw); + void (*deinit)(struct rwnx_plat *rwnx_plat); + u8* (*get_address)(struct rwnx_plat *rwnx_plat, int addr_name, + unsigned int offset); + void (*ack_irq)(struct rwnx_plat *rwnx_plat); + int (*get_config_reg)(struct rwnx_plat *rwnx_plat, const u32 **list); + + u8 priv[0] __aligned(sizeof(void *)); +}; + +#define RWNX_ADDR(plat, base, offset) \ + plat->get_address(plat, base, offset) + +#define RWNX_REG_READ(plat, base, offset) \ + readl(plat->get_address(plat, base, offset)) + +#define RWNX_REG_WRITE(val, plat, base, offset) \ + writel(val, plat->get_address(plat, base, offset)) + +extern struct rwnx_plat *g_rwnx_plat; + +int rwnx_platform_init(struct rwnx_plat *rwnx_plat, void **platform_data); +void rwnx_platform_deinit(struct rwnx_hw *rwnx_hw); + +int rwnx_platform_on(struct rwnx_hw *rwnx_hw, void *config); +void rwnx_platform_off(struct rwnx_hw *rwnx_hw, void **config); + +int is_file_exist(char* name); +void get_userconfig_txpwr_lvl_in_fdrv(txpwr_lvl_conf_t *txpwr_lvl); +void get_userconfig_txpwr_lvl_v2_in_fdrv(txpwr_lvl_conf_v2_t *txpwr_lvl_v2); +void get_userconfig_txpwr_lvl_v3_in_fdrv(txpwr_lvl_conf_v3_t *txpwr_lvl_v3); +void get_userconfig_txpwr_lvl_v4_in_fdrv(txpwr_lvl_conf_v4_t *txpwr_lvl_v4); +void get_userconfig_txpwr_lvl_adj_in_fdrv(txpwr_lvl_adj_conf_t *txpwr_lvl_adj); +void get_userconfig_txpwr_ofst_in_fdrv(txpwr_ofst_conf_t *txpwr_ofst); +void get_userconfig_txpwr_ofst2x_in_fdrv(txpwr_ofst2x_conf_t *txpwr_ofst2x); +void get_userconfig_txpwr_ofst2x_v2_in_fdrv(txpwr_ofst2x_conf_v2_t *txpwr_ofst2x_v2); +void get_userconfig_txpwr_loss(txpwr_loss_conf_t *txpwr_loss); +void set_txpwr_loss_ofst(s8_l value); +void rwnx_plat_userconfig_parsing(char *buffer, int size); + +uint8_t get_ccode_region(char * ccode); +u8 get_region_index(char * name); + + +#ifdef CONFIG_POWER_LIMIT +int8_t rwnx_plat_powerlimit_save(u8_l band, char *channel, u8_l bw, char *limit, char *name); +void rwnx_plat_powerlimit_parsing(char *buffer, int size, char *cc); +int8_t get_powerlimit_by_freq(uint8_t band, uint16_t freq, uint8_t r_idx); +int8_t get_powerlimit_by_chnum(uint8_t chnum, uint8_t r_idx, uint8_t bw); +#endif +int rwnx_platform_register_drv(void); +void rwnx_platform_unregister_drv(void); + +extern struct device *rwnx_platform_get_dev(struct rwnx_plat *rwnx_plat); + +static inline unsigned int rwnx_platform_get_irq(struct rwnx_plat *rwnx_plat) +{ + return rwnx_plat->pci_dev->irq; +} + +#endif /* _RWNX_PLATFORM_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_prof.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_prof.h new file mode 100644 index 0000000..fa59221 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_prof.h @@ -0,0 +1,133 @@ +/** + **************************************************************************************** + * + * @file rwnx_prof.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ + +#ifndef _RWNX_PROF_H_ +#define _RWNX_PROF_H_ + +#include "reg_access.h" +#include "rwnx_platform.h" + +static inline void rwnx_prof_set(struct rwnx_hw *rwnx_hw, int val) +{ + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + RWNX_REG_WRITE(val, rwnx_plat, RWNX_ADDR_SYSTEM, NXMAC_SW_SET_PROFILING_ADDR); +} + +static inline void rwnx_prof_clear(struct rwnx_hw *rwnx_hw, int val) +{ + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + RWNX_REG_WRITE(val, rwnx_plat, RWNX_ADDR_SYSTEM, NXMAC_SW_CLEAR_PROFILING_ADDR); +} + +#if 0 +/* Defines for SW Profiling registers values */ +enum { + TX_IPC_IRQ, + TX_IPC_EVT, + TX_PREP_EVT, + TX_DMA_IRQ, + TX_MAC_IRQ, + TX_PAYL_HDL, + TX_CFM_EVT, + TX_IPC_CFM, + RX_MAC_IRQ, // 8 + RX_TRIGGER_EVT, + RX_DMA_IRQ, + RX_DMA_EVT, + RX_IPC_IND, + RX_MPDU_XFER, + DBG_PROF_MAX +}; +#endif + +enum { + SW_PROF_HOSTBUF_IDX = 12, + /****** IPC IRQs related signals ******/ + /* E2A direction */ + SW_PROF_IRQ_E2A_RXDESC = 16, // to make sure we let 16 bits available for LMAC FW + SW_PROF_IRQ_E2A_TXCFM, + SW_PROF_IRQ_E2A_DBG, + SW_PROF_IRQ_E2A_MSG, + SW_PROF_IPC_MSGPUSH, + SW_PROF_MSGALLOC, + SW_PROF_MSGIND, + SW_PROF_DBGIND, + + /* A2E direction */ + SW_PROF_IRQ_A2E_TXCFM_BACK, + + /****** Driver functions related signals ******/ + SW_PROF_WAIT_QUEUE_STOP, + SW_PROF_WAIT_QUEUE_WAKEUP, + SW_PROF_RWNXDATAIND, + SW_PROF_RWNX_IPC_IRQ_HDLR, + SW_PROF_RWNX_IPC_THR_IRQ_HDLR, + SW_PROF_IEEE80211RX, + SW_PROF_RWNX_PATTERN, + SW_PROF_MAX +}; + +// [LT]For debug purpose only +#if (0) +#define SW_PROF_CHAN_CTXT_CFM_HDL_BIT (21) +#define SW_PROF_CHAN_CTXT_CFM_BIT (22) +#define SW_PROF_CHAN_CTXT_CFM_SWDONE_BIT (23) +#define SW_PROF_CHAN_CTXT_PUSH_BIT (24) +#define SW_PROF_CHAN_CTXT_QUEUE_BIT (25) +#define SW_PROF_CHAN_CTXT_TX_BIT (26) +#define SW_PROF_CHAN_CTXT_TX_PAUSE_BIT (27) +#define SW_PROF_CHAN_CTXT_PSWTCH_BIT (28) +#define SW_PROF_CHAN_CTXT_SWTCH_BIT (29) + +// TO DO: update this + +#define REG_SW_SET_PROFILING_CHAN(env, bit) \ + rwnx_prof_set((struct rwnx_hw*)env, BIT(bit)) + +#define REG_SW_CLEAR_PROFILING_CHAN(env, bit) \ + rwnx_prof_clear((struct rwnx_hw*)env, BIT(bit)) + +#else +#define SW_PROF_CHAN_CTXT_CFM_HDL_BIT (0) +#define SW_PROF_CHAN_CTXT_CFM_BIT (0) +#define SW_PROF_CHAN_CTXT_CFM_SWDONE_BIT (0) +#define SW_PROF_CHAN_CTXT_PUSH_BIT (0) +#define SW_PROF_CHAN_CTXT_QUEUE_BIT (0) +#define SW_PROF_CHAN_CTXT_TX_BIT (0) +#define SW_PROF_CHAN_CTXT_TX_PAUSE_BIT (0) +#define SW_PROF_CHAN_CTXT_PSWTCH_BIT (0) +#define SW_PROF_CHAN_CTXT_SWTCH_BIT (0) + +#define REG_SW_SET_PROFILING_CHAN(env, bit) do {} while (0) +#define REG_SW_CLEAR_PROFILING_CHAN(env, bit) do {} while (0) +#endif + +#ifdef CONFIG_RWNX_SW_PROFILING +/* Macros for SW PRofiling registers access */ +#define REG_SW_SET_PROFILING(env, bit) \ + rwnx_prof_set((struct rwnx_hw*)env, BIT(bit)) + +#define REG_SW_SET_HOSTBUF_IDX_PROFILING(env, val) \ + rwnx_prof_set((struct rwnx_hw*)env, val<<(SW_PROF_HOSTBUF_IDX)) + +#define REG_SW_CLEAR_PROFILING(env, bit) \ + rwnx_prof_clear((struct rwnx_hw*)env, BIT(bit)) + +#define REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env) \ + rwnx_prof_clear((struct rwnx_hw*)env,0x0F<<(SW_PROF_HOSTBUF_IDX)) + +#else +#define REG_SW_SET_PROFILING(env, value) do {} while (0) +#define REG_SW_CLEAR_PROFILING(env, value) do {} while (0) +#define REG_SW_SET_HOSTBUF_IDX_PROFILING(env, val) do {} while (0) +#define REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env) do {} while (0) +#endif + +#endif /* _RWNX_PROF_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_radar.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_radar.c new file mode 100644 index 0000000..084dcf4 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_radar.c @@ -0,0 +1,1793 @@ +/** +****************************************************************************** + * + * @file rwnx_radar.c + * + * @brief Functions to handle radar detection + * Radar detection is copied (and adapted) from ath driver source code. + * + * Copyright (c) 2012 Neratec Solutions AG + * Copyright (C) RivieraWaves 2015-2019 + * + ****************************************************************************** + */ +#include +#include +#include +#include + +#include "rwnx_radar.h" +#include "rwnx_defs.h" +#include "rwnx_msg_tx.h" +#include "rwnx_events.h" +#include "rwnx_compat.h" + +/* + * tolerated deviation of radar time stamp in usecs on both sides + * TODO: this might need to be HW-dependent + */ +#define PRI_TOLERANCE 16 + +/** + * struct radar_types - contains array of patterns defined for one DFS domain + * @domain: DFS regulatory domain + * @num_radar_types: number of radar types to follow + * @radar_types: radar types array + */ +struct radar_types { + enum nl80211_dfs_regions region; + u32 num_radar_types; + const struct radar_detector_specs *spec_riu; + const struct radar_detector_specs *spec_fcu; +}; + +/** + * Type of radar waveform: + * RADAR_WAVEFORM_SHORT : waveform defined by + * - pulse width + * - pulse interval in a burst (pri) + * - number of pulses in a burst (ppb) + * + * RADAR_WAVEFORM_WEATHER : + * same than SHORT except that ppb is dependent of pri + * + * RADAR_WAVEFORM_INTERLEAVED : + * same than SHORT except there are several value of pri (interleaved) + * + * RADAR_WAVEFORM_LONG : + * + */ +enum radar_waveform_type { + RADAR_WAVEFORM_SHORT, + RADAR_WAVEFORM_WEATHER, + RADAR_WAVEFORM_INTERLEAVED, + RADAR_WAVEFORM_LONG +}; + +/** + * struct radar_detector_specs - detector specs for a radar pattern type + * @type_id: pattern type, as defined by regulatory + * @width_min: minimum radar pulse width in [us] + * @width_max: maximum radar pulse width in [us] + * @pri_min: minimum pulse repetition interval in [us] (including tolerance) + * @pri_max: minimum pri in [us] (including tolerance) + * @num_pri: maximum number of different pri for this type + * @ppb: pulses per bursts for this type + * @ppb_thresh: number of pulses required to trigger detection + * @max_pri_tolerance: pulse time stamp tolerance on both sides [us] + * @type: Type of radar waveform + */ +struct radar_detector_specs { + u8 type_id; + u8 width_min; + u8 width_max; + u16 pri_min; + u16 pri_max; + u8 num_pri; + u8 ppb; + u8 ppb_thresh; + u8 max_pri_tolerance; + enum radar_waveform_type type; +}; + + +/* percentage on ppb threshold to trigger detection */ +#define MIN_PPB_THRESH 50 +#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100) +#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF) + +/* width tolerance */ +#define WIDTH_TOLERANCE 2 +#define WIDTH_LOWER(X) (X) +#define WIDTH_UPPER(X) (X) + +#define ETSI_PATTERN_SHORT(ID, WMIN, WMAX, PMIN, PMAX, PPB) \ + { \ + ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \ + (PRF2PRI(PMAX) - PRI_TOLERANCE), \ + (PRF2PRI(PMIN) + PRI_TOLERANCE), 1, PPB, \ + PPB_THRESH(PPB), PRI_TOLERANCE, RADAR_WAVEFORM_SHORT \ + } + +#define ETSI_PATTERN_INTERLEAVED(ID, WMIN, WMAX, PMIN, PMAX, PRFMIN, PRFMAX, PPB) \ + { \ + ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \ + (PRF2PRI(PMAX) * PRFMIN- PRI_TOLERANCE), \ + (PRF2PRI(PMIN) * PRFMAX + PRI_TOLERANCE), \ + PRFMAX, PPB * PRFMAX, \ + PPB_THRESH(PPB), PRI_TOLERANCE, RADAR_WAVEFORM_INTERLEAVED \ + } + +/* radar types as defined by ETSI EN-301-893 v1.7.1 */ +static const struct radar_detector_specs etsi_radar_ref_types_v17_riu[] = { + ETSI_PATTERN_SHORT(0, 0, 8, 700, 700, 18), + ETSI_PATTERN_SHORT(1, 0, 10, 200, 1000, 10), + ETSI_PATTERN_SHORT(2, 0, 22, 200, 1600, 15), + ETSI_PATTERN_SHORT(3, 0, 22, 2300, 4000, 25), + ETSI_PATTERN_SHORT(4, 20, 38, 2000, 4000, 20), + ETSI_PATTERN_INTERLEAVED(5, 0, 8, 300, 400, 2, 3, 10), + ETSI_PATTERN_INTERLEAVED(6, 0, 8, 400, 1200, 2, 3, 15), +}; + +static const struct radar_detector_specs etsi_radar_ref_types_v17_fcu[] = { + ETSI_PATTERN_SHORT(0, 0, 8, 700, 700, 18), + ETSI_PATTERN_SHORT(1, 0, 8, 200, 1000, 10), + ETSI_PATTERN_SHORT(2, 0, 16, 200, 1600, 15), + ETSI_PATTERN_SHORT(3, 0, 16, 2300, 4000, 25), + ETSI_PATTERN_SHORT(4, 20, 34, 2000, 4000, 20), + ETSI_PATTERN_INTERLEAVED(5, 0, 8, 300, 400, 2, 3, 10), + ETSI_PATTERN_INTERLEAVED(6, 0, 8, 400, 1200, 2, 3, 15), +}; + +static const struct radar_types etsi_radar_types_v17 = { + .region = NL80211_DFS_ETSI, + .num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v17_riu), + .spec_riu = etsi_radar_ref_types_v17_riu, + .spec_fcu = etsi_radar_ref_types_v17_fcu, +}; + +#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, TYPE) \ + { \ + ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \ + PMIN - PRI_TOLERANCE, \ + PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \ + PPB_THRESH(PPB), PRI_TOLERANCE, TYPE \ + } + +static const struct radar_detector_specs fcc_radar_ref_types_riu[] = { + FCC_PATTERN(0, 0, 8, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(1, 0, 8, 518, 3066, 1, 102, RADAR_WAVEFORM_WEATHER), + FCC_PATTERN(2, 0, 8, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(3, 6, 20, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(4, 10, 28, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(5, 50, 110, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG), + FCC_PATTERN(6, 0, 8, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT), +}; + +static const struct radar_detector_specs fcc_radar_ref_types_fcu[] = { + FCC_PATTERN(0, 0, 8, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(1, 0, 8, 518, 3066, 1, 102, RADAR_WAVEFORM_WEATHER), + FCC_PATTERN(2, 0, 8, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(3, 6, 12, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(4, 10, 22, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT), + FCC_PATTERN(5, 50, 104, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG), + FCC_PATTERN(6, 0, 8, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT), +}; + +static const struct radar_types fcc_radar_types = { + .region = NL80211_DFS_FCC, + .num_radar_types = ARRAY_SIZE(fcc_radar_ref_types_riu), + .spec_riu = fcc_radar_ref_types_riu, + .spec_fcu = fcc_radar_ref_types_fcu, +}; + +#define JP_PATTERN FCC_PATTERN +//JP_PATTERN(1, 2, 8, 3846, 3846, 1, 18, RADAR_WAVEFORM_SHORT), +static const struct radar_detector_specs jp_radar_ref_types_riu[] = { + JP_PATTERN(0, 0, 4, 1388, 1388, 1, 18, RADAR_WAVEFORM_SHORT), + JP_PATTERN(1, 0, 4, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT), + JP_PATTERN(2, 0, 4, 4000, 4000, 1, 18, RADAR_WAVEFORM_SHORT), + JP_PATTERN(3, 0, 6, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT), + JP_PATTERN(4, 6, 12, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT), + JP_PATTERN(5, 10, 22, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT), + JP_PATTERN(6, 48, 110, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG), + JP_PATTERN(7, 0, 4, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT), +}; + +static const struct radar_detector_specs jp_radar_ref_types_fcu[] = { + JP_PATTERN(0, 0, 4, 1388, 1388, 1, 18, RADAR_WAVEFORM_SHORT), + JP_PATTERN(1, 0, 4, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT), + JP_PATTERN(2, 0, 4, 4000, 4000, 1, 18, RADAR_WAVEFORM_SHORT), + JP_PATTERN(3, 0, 6, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT), + JP_PATTERN(4, 6, 12, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT), + JP_PATTERN(5, 10, 22, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT), + JP_PATTERN(6, 48, 110, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG), + JP_PATTERN(7, 0, 4, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT), +}; + +static const struct radar_types jp_radar_types = { + .region = NL80211_DFS_JP, + .num_radar_types = ARRAY_SIZE(jp_radar_ref_types_riu), + .spec_riu = jp_radar_ref_types_riu, + .spec_fcu = jp_radar_ref_types_fcu, +}; + +static const struct radar_types *dfs_domains[] = { + &etsi_radar_types_v17, + &fcc_radar_types, + &jp_radar_types, +}; + + +/** + * struct pri_sequence - sequence of pulses matching one PRI + * @head: list_head + * @pri: pulse repetition interval (PRI) in usecs + * @dur: duration of sequence in usecs + * @count: number of pulses in this sequence + * @count_falses: number of not matching pulses in this sequence + * @first_ts: time stamp of first pulse in usecs + * @last_ts: time stamp of last pulse in usecs + * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur) + * @ppb_thresh: Number of pulses to validate detection + * (need for weather radar whose value depends of pri) + */ +struct pri_sequence { + struct list_head head; + u32 pri; + u32 dur; + u32 count; + u32 count_falses; + u64 first_ts; + u64 last_ts; + u64 deadline_ts; + u8 ppb_thresh; +}; + + +/** + * struct pulse_elem - elements in pulse queue + * @ts: time stamp in usecs + */ +struct pulse_elem { + struct list_head head; + u64 ts; +}; + +/** + * struct pri_detector - PRI detector element for a dedicated radar type + * @head: + * @rs: detector specs for this detector element + * @last_ts: last pulse time stamp considered for this element in usecs + * @sequences: list_head holding potential pulse sequences + * @pulses: list connecting pulse_elem objects + * @count: number of pulses in queue + * @max_count: maximum number of pulses to be queued + * @window_size: window size back from newest pulse time stamp in usecs + * @freq: + */ +struct pri_detector { + struct list_head head; + const struct radar_detector_specs *rs; + u64 last_ts; + struct list_head sequences; + struct list_head pulses; + u32 count; + u32 max_count; + u32 window_size; + struct pri_detector_ops *ops; + u16 freq; +}; + +/** + * struct pri_detector_ops - PRI detector ops (dependent of waveform type) + * @init : Initialize pri_detector structure + * @add_pulse : Add a pulse to the pri-detector + * @reset_on_pri_overflow : Should the pri_detector be resetted when pri overflow + */ +struct pri_detector_ops { + void (*init)(struct pri_detector *pde); + struct pri_sequence * (*add_pulse)(struct rwnx_radar *radar, struct pri_detector *pde, u16 len, u64 ts, u16 pri); + int reset_on_pri_overflow; +}; + + +/****************************************************************************** + * PRI (pulse repetition interval) sequence detection + *****************************************************************************/ +/** + * Singleton Pulse and Sequence Pools + * + * Instances of pri_sequence and pulse_elem are kept in singleton pools to + * reduce the number of dynamic allocations. They are shared between all + * instances and grow up to the peak number of simultaneously used objects. + * + * Memory is freed after all references to the pools are released. + */ +static u32 singleton_pool_references; +static LIST_HEAD(pulse_pool); +static LIST_HEAD(pseq_pool); +static DEFINE_SPINLOCK(pool_lock); + +static void pool_register_ref(void) +{ + spin_lock_bh(&pool_lock); + singleton_pool_references++; + spin_unlock_bh(&pool_lock); +} + +static void pool_deregister_ref(void) +{ + spin_lock_bh(&pool_lock); + singleton_pool_references--; + if (singleton_pool_references == 0) { + /* free singleton pools with no references left */ + struct pri_sequence *ps, *ps0; + struct pulse_elem *p, *p0; + + list_for_each_entry_safe(p, p0, &pulse_pool, head) { + list_del(&p->head); + kfree(p); + } + list_for_each_entry_safe(ps, ps0, &pseq_pool, head) { + list_del(&ps->head); + kfree(ps); + } + } + spin_unlock_bh(&pool_lock); +} + +static void pool_put_pulse_elem(struct pulse_elem *pe) +{ + spin_lock_bh(&pool_lock); + list_add(&pe->head, &pulse_pool); + spin_unlock_bh(&pool_lock); +} + +static void pool_put_pseq_elem(struct pri_sequence *pse) +{ + spin_lock_bh(&pool_lock); + list_add(&pse->head, &pseq_pool); + spin_unlock_bh(&pool_lock); +} + +static struct pri_sequence *pool_get_pseq_elem(void) +{ + struct pri_sequence *pse = NULL; + spin_lock_bh(&pool_lock); + if (!list_empty(&pseq_pool)) { + pse = list_first_entry(&pseq_pool, struct pri_sequence, head); + list_del(&pse->head); + } + spin_unlock_bh(&pool_lock); + + if (pse == NULL) { + pse = kmalloc(sizeof(*pse), GFP_ATOMIC); + } + + return pse; +} + +static struct pulse_elem *pool_get_pulse_elem(void) +{ + struct pulse_elem *pe = NULL; + spin_lock_bh(&pool_lock); + if (!list_empty(&pulse_pool)) { + pe = list_first_entry(&pulse_pool, struct pulse_elem, head); + list_del(&pe->head); + } + spin_unlock_bh(&pool_lock); + return pe; +} + +static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde) +{ + struct list_head *l = &pde->pulses; + if (list_empty(l)) + return NULL; + return list_entry(l->prev, struct pulse_elem, head); +} + +static bool pulse_queue_dequeue(struct pri_detector *pde) +{ + struct pulse_elem *p = pulse_queue_get_tail(pde); + if (p != NULL) { + list_del_init(&p->head); + pde->count--; + /* give it back to pool */ + pool_put_pulse_elem(p); + } + return (pde->count > 0); +} + +/** + * pulse_queue_check_window - remove pulses older than window + * @pde: pointer on pri_detector + * + * dequeue pulse that are too old. + */ +static +void pulse_queue_check_window(struct pri_detector *pde) +{ + u64 min_valid_ts; + struct pulse_elem *p; + + /* there is no delta time with less than 2 pulses */ + if (pde->count < 2) + return; + + if (pde->last_ts <= pde->window_size) + return; + + min_valid_ts = pde->last_ts - pde->window_size; + while ((p = pulse_queue_get_tail(pde)) != NULL) { + if (p->ts >= min_valid_ts) + return; + pulse_queue_dequeue(pde); + } +} + +/** + * pulse_queue_enqueue - Queue one pulse + * @pde: pointer on pri_detector + * + * Add one pulse to the list. If the maximum number of pulses + * if reached, remove oldest one. + */ +static +bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts) +{ + struct pulse_elem *p = pool_get_pulse_elem(); + if (p == NULL) { + p = kmalloc(sizeof(*p), GFP_ATOMIC); + if (p == NULL) { + return false; + } + } + INIT_LIST_HEAD(&p->head); + p->ts = ts; + list_add(&p->head, &pde->pulses); + pde->count++; + pde->last_ts = ts; + pulse_queue_check_window(pde); + if (pde->count >= pde->max_count) + pulse_queue_dequeue(pde); + + return true; +} + + +/*************************************************************************** + * Short waveform + **************************************************************************/ +/** + * pde_get_multiple() - get number of multiples considering a given tolerance + * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise + */ +static +u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance) +{ + u32 remainder; + u32 factor; + u32 delta; + + if (fraction == 0) + return 0; + + delta = (val < fraction) ? (fraction - val) : (val - fraction); + + if (delta <= tolerance) + /* val and fraction are within tolerance */ + return 1; + + factor = val / fraction; + remainder = val % fraction; + if (remainder > tolerance) { + /* no exact match */ + if ((fraction - remainder) <= tolerance) + /* remainder is within tolerance */ + factor++; + else + factor = 0; + } + return factor; +} + +/** + * pde_short_create_sequences - create_sequences function for + * SHORT/WEATHER/INTERLEAVED radar waveform + * @pde: pointer on pri_detector + * @ts: timestamp of the pulse + * @min_count: Minimum number of pulse to be present in the sequence. + * (With this pulse there is already a sequence with @min_count + * pulse, so if we can't create a sequence with more pulse don't + * create it) + * @return: false if an error occured (memory allocation) true otherwise + * + * For each pulses queued check if we can create a sequence with + * pri = (ts - pulse_queued.ts) which contains more than @min_count pulses. + * + */ +static +bool pde_short_create_sequences(struct pri_detector *pde, + u64 ts, u32 min_count) +{ + struct pulse_elem *p; + u16 pulse_idx = 0; + + list_for_each_entry(p, &pde->pulses, head) { + struct pri_sequence ps, *new_ps; + struct pulse_elem *p2; + u32 tmp_false_count; + u64 min_valid_ts; + u32 delta_ts = ts - p->ts; + pulse_idx++; + + if (delta_ts < pde->rs->pri_min) + /* ignore too small pri */ + continue; + + if (delta_ts > pde->rs->pri_max) + /* stop on too large pri (sorted list) */ + break; + + /* build a new sequence with new potential pri */ + ps.count = 2; + ps.count_falses = pulse_idx - 1; + ps.first_ts = p->ts; + ps.last_ts = ts; + ps.pri = ts - p->ts; + ps.dur = ps.pri * (pde->rs->ppb - 1) + + 2 * pde->rs->max_pri_tolerance; + + p2 = p; + tmp_false_count = 0; + if (ps.dur > ts) + min_valid_ts = 0; + else + min_valid_ts = ts - ps.dur; + /* check which past pulses are candidates for new sequence */ + list_for_each_entry_continue(p2, &pde->pulses, head) { + u32 factor; + if (p2->ts < min_valid_ts) + /* stop on crossing window border */ + break; + /* check if pulse match (multi)PRI */ + factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri, + pde->rs->max_pri_tolerance); + if (factor > 0) { + ps.count++; + ps.first_ts = p2->ts; + /* + * on match, add the intermediate falses + * and reset counter + */ + ps.count_falses += tmp_false_count; + tmp_false_count = 0; + } else { + /* this is a potential false one */ + tmp_false_count++; + } + } + if (ps.count <= min_count) { + /* did not reach minimum count, drop sequence */ + continue; + } + /* this is a valid one, add it */ + ps.deadline_ts = ps.first_ts + ps.dur; + if (pde->rs->type == RADAR_WAVEFORM_WEATHER) { + ps.ppb_thresh = 19000000 / (360 * ps.pri); + ps.ppb_thresh = PPB_THRESH(ps.ppb_thresh); + } else { + ps.ppb_thresh = pde->rs->ppb_thresh; + } + + new_ps = pool_get_pseq_elem(); + if (new_ps == NULL) { + return false; + } + memcpy(new_ps, &ps, sizeof(ps)); + INIT_LIST_HEAD(&new_ps->head); + list_add(&new_ps->head, &pde->sequences); + } + return true; +} + +/** + * pde_short_add_to_existing_seqs - add_to_existing_seqs function for + * SHORT/WEATHER/INTERLEAVED radar waveform + * @pde: pointer on pri_detector + * @ts: timestamp of the pulse + * + * Check all sequemces created for this pde. + * - If the sequence is too old delete it. + * - Else if the delta with the previous pulse match the pri of the sequence + * add the pulse to this sequence. If the pulse cannot be added it is added + * to the false pulses for this sequence + * + * @return the length of the longest sequence in which the pulse has been added + */ +static +u32 pde_short_add_to_existing_seqs(struct pri_detector *pde, u64 ts) +{ + u32 max_count = 0; + struct pri_sequence *ps, *ps2; + list_for_each_entry_safe(ps, ps2, &pde->sequences, head) { + u32 delta_ts; + u32 factor; + + /* first ensure that sequence is within window */ + if (ts > ps->deadline_ts) { + list_del_init(&ps->head); + pool_put_pseq_elem(ps); + continue; + } + + delta_ts = ts - ps->last_ts; + factor = pde_get_multiple(delta_ts, ps->pri, + pde->rs->max_pri_tolerance); + + if (factor > 0) { + ps->last_ts = ts; + ps->count++; + + if (max_count < ps->count) + max_count = ps->count; + } else { + ps->count_falses++; + } + } + return max_count; +} + + +/** + * pde_short_check_detection - check_detection function for + * SHORT/WEATHER/INTERLEAVED radar waveform + * @pde: pointer on pri_detector + * + * Check all sequemces created for this pde. + * - If a sequence contains more pulses than the threshold and more matching + * that false pulses. + * + * @return The first complete sequence, and NULL if no sequence is complete. + */ +static +struct pri_sequence * pde_short_check_detection(struct pri_detector *pde) +{ + struct pri_sequence *ps; + + if (list_empty(&pde->sequences)) + return NULL; + + list_for_each_entry(ps, &pde->sequences, head) { + /* + * we assume to have enough matching confidence if we + * 1) have enough pulses + * 2) have more matching than false pulses + */ + if ((ps->count >= ps->ppb_thresh) && + (ps->count * pde->rs->num_pri > ps->count_falses)) { + return ps; + } + } + return NULL; +} + +/** + * pde_short_init - init function for + * SHORT/WEATHER/INTERLEAVED radar waveform + * @pde: pointer on pri_detector + * + * Initialize pri_detector window size to the maximun size of one burst + * for the radar specification associated. + */ +static +void pde_short_init(struct pri_detector *pde) +{ + pde->window_size = pde->rs->pri_max * pde->rs->ppb * pde->rs->num_pri; + pde->max_count = pde->rs->ppb * 2; +} + +static void pri_detector_reset(struct pri_detector *pde, u64 ts); +/** + * pde_short_add_pulse - Add pulse to a pri_detector for + * SHORT/WEATHER/INTERLEAVED radar waveform + * + * @pde : pointer on pri_detector + * @len : width of the pulse + * @ts : timestamp of the pulse received + * @pri : Delta in us with the previous pulse. + * (0 means that delta in bigger than 65535 us) + * + * Process on pulse within this pri_detector + * - First try to add it to existing sequence + * - Then try to create a new and longest sequence + * - Check if this pulse complete a sequence + * - If not save this pulse in the list + */ +static +struct pri_sequence *pde_short_add_pulse(struct rwnx_radar *radar, struct pri_detector *pde, + u16 len, u64 ts, u16 pri) +{ + u32 max_updated_seq; + struct pri_sequence *ps; + const struct radar_detector_specs *rs = pde->rs; + + if (pde->count == 0) { + /* This is the first pulse after reset, no need to check sequences */ + pulse_queue_enqueue(pde, ts); + return NULL; + } + + if ((ts - pde->last_ts) < rs->max_pri_tolerance) { + /* if delta to last pulse is too short, don't use this pulse */ + return NULL; + } + + max_updated_seq = pde_short_add_to_existing_seqs(pde, ts); + + if (!pde_short_create_sequences(pde, ts, max_updated_seq)) { + pri_detector_reset(pde, ts); + return NULL; + } + + ps = pde_short_check_detection(pde); + + if (ps == NULL) + pulse_queue_enqueue(pde, ts); + + return ps; +} + + + +/** + * pri detector ops to detect short radar waveform + * A Short waveform is defined by : + * The width of pulses. + * The interval between two pulses inside a burst (called pri) + * (some waveform may have or 2/3 interleaved pri) + * The number of pulses per burst (ppb) + */ +static struct pri_detector_ops pri_detector_short = { + .init = pde_short_init, + .add_pulse = pde_short_add_pulse, + .reset_on_pri_overflow = 1, +}; + + +/*************************************************************************** + * Long waveform + **************************************************************************/ +#define LONG_RADAR_DURATION 12000000 +#define LONG_RADAR_BURST_MIN_DURATION (12000000 / 20) +#define LONG_RADAR_MAX_BURST 20 + +/** + * pde_long_init - init function for LONG radar waveform + * @pde: pointer on pri_detector + * + * Initialize pri_detector window size to the long waveform radar + * waveform (ie. 12s) and max_count + */ +static +void pde_long_init(struct pri_detector *pde) +{ + pde->window_size = LONG_RADAR_DURATION; + pde->max_count = LONG_RADAR_MAX_BURST; /* only count burst not pulses */ +} + + +/** + * pde_long_add_pulse - Add pulse to a pri_detector for + * LONG radar waveform + * + * @pde : pointer on pri_detector + * @len : width of the pulse + * @ts : timestamp of the pulse received + * @pri : Delta in us with the previous pulse. + * + * + * For long pulse we only handle one sequence. Since each burst + * have a different set of parameters (number of pulse, pri) than + * the previous one we only use pulse width to add the pulse in the + * sequence. + * We only queue one pulse per burst and valid the radar when enough burst + * has been detected. + */ +static +struct pri_sequence *pde_long_add_pulse(struct rwnx_radar *radar, struct pri_detector *pde, + u16 len, u64 ts, u16 pri) +{ + struct pri_sequence *ps; + const struct radar_detector_specs *rs = pde->rs; + + //if(radar->status != RWNX_RADAR_CAC_BUSY) { + // return NULL; + //} + + if (list_empty(&pde->sequences)) { + /* First pulse, create a new sequence */ + ps = pool_get_pseq_elem(); + if (ps == NULL) { + return NULL; + } + + /*For long waveform, "count" represents the number of burst detected */ + ps->count = 1; + /*"count_false" represents the number of pulse in the current burst */ + ps->count_falses = 1; + ps->first_ts = ts; + ps->last_ts = ts; + ps->deadline_ts = ts + pde->window_size; + ps->pri = 0; + INIT_LIST_HEAD(&ps->head); + list_add(&ps->head, &pde->sequences); + pulse_queue_enqueue(pde, ts); + } else { + u32 delta_ts; + + ps = (struct pri_sequence *)pde->sequences.next; + + delta_ts = ts - ps->last_ts; + ps->last_ts = ts; + + if (delta_ts < rs->pri_max) { + /* ignore pulse too close from previous one */ + } else if ((delta_ts >= rs->pri_min) && + (delta_ts <= rs->pri_max)) { + /* this is a new pulse in the current burst, ignore it + (i.e don't queue it) */ + ps->count_falses++; + } else if ((ps->count > 2) && + (ps->dur + delta_ts) < LONG_RADAR_BURST_MIN_DURATION) { + /* not enough time between burst, ignore pulse */ + } else { + /* a new burst */ + ps->count++; + ps->count_falses = 1; + + /* reset the start of the sequence if deadline reached */ + if (ts > ps->deadline_ts) { + struct pulse_elem *p; + u64 min_valid_ts; + + min_valid_ts = ts - pde->window_size; + while ((p = pulse_queue_get_tail(pde)) != NULL) { + if (p->ts >= min_valid_ts) { + ps->first_ts = p->ts; + ps->deadline_ts = p->ts + pde->window_size; + break; + } + pulse_queue_dequeue(pde); + ps->count--; + } + } + + /* valid radar if enough burst detected and delta with first burst + is at least duration/2 */ + if (ps->count > pde->rs->ppb_thresh && + (ts - ps->first_ts) > (pde->window_size / 2)) { + return ps; + } else { + pulse_queue_enqueue(pde, ts); + ps->dur = delta_ts; + } + } + } + + return NULL; +} + +/** + * pri detector ops to detect long radar waveform + */ +static struct pri_detector_ops pri_detector_long = { + .init = pde_long_init, + .add_pulse = pde_long_add_pulse, + .reset_on_pri_overflow = 0, +}; + + +/*************************************************************************** + * PRI detector init/reset/exit/get + **************************************************************************/ +/** + * pri_detector_init- Create a new pri_detector + * + * @dpd: dfs_pattern_detector instance pointer + * @radar_type: index of radar pattern + * @freq: Frequency of the pri detector + */ +struct pri_detector *pri_detector_init(struct dfs_pattern_detector *dpd, + u16 radar_type, u16 freq) +{ + struct pri_detector *pde; + + pde = kzalloc(sizeof(*pde), GFP_ATOMIC); + if (pde == NULL) + return NULL; + + INIT_LIST_HEAD(&pde->sequences); + INIT_LIST_HEAD(&pde->pulses); + INIT_LIST_HEAD(&pde->head); + list_add(&pde->head, &dpd->detectors[radar_type]); + + pde->rs = &dpd->radar_spec[radar_type]; + pde->freq = freq; + + if (pde->rs->type == RADAR_WAVEFORM_LONG) { + /* for LONG WAVEFORM */ + pde->ops = &pri_detector_long; + } else { + /* for SHORT, WEATHER and INTERLEAVED */ + pde->ops = &pri_detector_short; + } + + /* Init dependent of specs */ + pde->ops->init(pde); + + pool_register_ref(); + return pde; +} + +/** + * pri_detector_reset - Reset pri_detector + * + * @pde: pointer on pri_detector + * @ts: New ts reference for the pri_detector + * + * free pulse queue and sequences list and give objects back to pools + */ +static +void pri_detector_reset(struct pri_detector *pde, u64 ts) +{ + struct pri_sequence *ps, *ps0; + struct pulse_elem *p, *p0; + list_for_each_entry_safe(ps, ps0, &pde->sequences, head) { + list_del_init(&ps->head); + pool_put_pseq_elem(ps); + } + list_for_each_entry_safe(p, p0, &pde->pulses, head) { + list_del_init(&p->head); + pool_put_pulse_elem(p); + } + pde->count = 0; + pde->last_ts = ts; +} + +/** + * pri_detector_exit - Delete pri_detector + * + * @pde: pointer on pri_detector + */ +static +void pri_detector_exit(struct pri_detector *pde) +{ + pri_detector_reset(pde, 0); + pool_deregister_ref(); + list_del(&pde->head); + kfree(pde); +} + +/** + * pri_detector_get() - get pri detector for a given frequency and type + * @dpd: dfs_pattern_detector instance pointer + * @freq: frequency in MHz + * @radar_type: index of radar pattern + * @return pointer to pri detector on success, NULL otherwise + * + * Return existing pri detector for the given frequency or return a + * newly create one. + * Pri detector are "merged" by frequency so that if a pri detector for a freq + * of +/- 2Mhz already exists don't create a new one. + * + * Maybe will need to adapt frequency merge for pattern with chirp. + */ +static struct pri_detector * +pri_detector_get(struct dfs_pattern_detector *dpd, u16 freq, u16 radar_type) +{ + struct pri_detector *pde, *cur = NULL; + list_for_each_entry(pde, &dpd->detectors[radar_type], head) { + if (pde->freq == freq) { + if (pde->count) + return pde; + else + cur = pde; + } else if (pde->freq - 2 == freq && pde->count) { + return pde; + } else if (pde->freq + 2 == freq && pde->count) { + return pde; + } + } + + if (cur) + return cur; + else + return pri_detector_init(dpd, radar_type, freq); +} + + +/****************************************************************************** + * DFS Pattern Detector + *****************************************************************************/ +/** + * dfs_pattern_detector_reset() - reset all channel detectors + * + * @dpd: dfs_pattern_detector + */ +static void dfs_pattern_detector_reset(struct dfs_pattern_detector *dpd) +{ + struct pri_detector *pde; + int i; + + for (i = 0; i < dpd->num_radar_types; i++) { + if (!list_empty(&dpd->detectors[i])) + list_for_each_entry(pde, &dpd->detectors[i], head) + pri_detector_reset(pde, dpd->last_pulse_ts); + } + + dpd->last_pulse_ts = 0; + dpd->prev_jiffies = jiffies; +} + +/** + * dfs_pattern_detector_reset() - delete all channel detectors + * + * @dpd: dfs_pattern_detector + */ +static void dfs_pattern_detector_exit(struct dfs_pattern_detector *dpd) +{ + struct pri_detector *pde, *pde0; + int i; + + for (i = 0; i < dpd->num_radar_types; i++) { + if (!list_empty(&dpd->detectors[i])) + list_for_each_entry_safe(pde, pde0, &dpd->detectors[i], head) + pri_detector_exit(pde); + } + + kfree(dpd); +} + +/** + * dfs_pattern_detector_pri_overflow - reset all channel detectors on pri + * overflow + * @dpd: dfs_pattern_detector + */ +static void dfs_pattern_detector_pri_overflow(struct dfs_pattern_detector *dpd) +{ + struct pri_detector *pde; + int i; + + for (i = 0; i < dpd->num_radar_types; i++) { + if (!list_empty(&dpd->detectors[i])) + list_for_each_entry(pde, &dpd->detectors[i], head) + if (pde->ops->reset_on_pri_overflow) + pri_detector_reset(pde, dpd->last_pulse_ts); + } +} + +void print_radar_detect_info(struct pri_detector *pde, struct pri_sequence *ps) +{ + struct radar_detector_specs *rs = (struct radar_detector_specs *)pde->rs; + struct pulse_elem *p; + u16 idx = 0; + + AICWFDBG(LOGINFO, "aic dfs detected: %d, %d, %d\n", pde->window_size, pde->count, pde->max_count); + AICWFDBG(LOGINFO, "\t rs: %u %u\n", rs->type, rs->type_id); + AICWFDBG(LOGINFO, "\t rs: width[%u, %u]\n", rs->width_min, rs->width_max); + AICWFDBG(LOGINFO, "\t rs: pri[%u, %u] %u\n", rs->pri_min, rs->pri_max, rs->num_pri); + AICWFDBG(LOGINFO, "\t rs: %u/%u %u\n", rs->ppb, rs->ppb_thresh, rs->max_pri_tolerance); + + AICWFDBG(LOGINFO, "\t ps.pri = %u\n" , ps->pri ); + AICWFDBG(LOGINFO, "\t ps.dur = %u\n" , ps->dur ); + AICWFDBG(LOGINFO, "\t ps.count = %u\n" , ps->count ); + AICWFDBG(LOGINFO, "\t ps.count_falses = %u\n" , ps->count_falses ); + AICWFDBG(LOGINFO, "\t ps.first_ts = %lu\n", (long unsigned int)ps->first_ts ); + AICWFDBG(LOGINFO, "\t ps.last_ts = %lu\n", (long unsigned int)ps->last_ts ); + AICWFDBG(LOGINFO, "\t ps.deadline_ts = %lu\n", (long unsigned int)ps->deadline_ts ); + AICWFDBG(LOGINFO, "\t ps.ppb_thresh = %u\n" , ps->ppb_thresh ); + + AICWFDBG(LOGINFO, "\t pulses -->\n"); + list_for_each_entry_reverse(p, &pde->pulses, head) { + AICWFDBG(LOGINFO, "\t %u: %lu\n", idx, (long unsigned int)p->ts); + idx++; + } +} + + +/** + * dfs_pattern_detector_add_pulse - Process one pulse + * + * @dpd: dfs_pattern_detector + * @chain: Chain that correspond to this pattern_detector (only for debug) + * @freq: frequency of the pulse + * @pri: Delta with previous pulse. (0 if delta is too big for u16) + * @len: width of the pulse + * @now: jiffies value when pulse was received + * + * Get (or create) the channel_detector for this frequency. Then add the pulse + * in each pri_detector created in this channel_detector. + * + * + * @return True is the pulse complete a radar pattern, false otherwise + */ +static bool dfs_pattern_detector_add_pulse(struct rwnx_radar *radar, struct dfs_pattern_detector *dpd, + enum rwnx_radar_chain chain, + u16 freq, u16 pri, u16 len, u32 now) +{ + u32 i; + + /* + * pulses received for a non-supported or un-initialized + * domain are treated as detected radars for fail-safety + */ + if (dpd->region == NL80211_DFS_UNSET) + return true; + + /* Compute pulse time stamp */ + if (pri == 0) { + u32 delta_jiffie; + if (unlikely(now < dpd->prev_jiffies)) { + delta_jiffie = 0xffffffff - dpd->prev_jiffies + now; + } else { + delta_jiffie = now - dpd->prev_jiffies; + } + dpd->last_pulse_ts += jiffies_to_usecs(delta_jiffie); + dpd->prev_jiffies = now; + dfs_pattern_detector_pri_overflow(dpd); + } else { + dpd->last_pulse_ts += pri; + } + + for (i = 0; i < dpd->num_radar_types; i++) { + struct pri_sequence *ps; + struct pri_detector *pde; + const struct radar_detector_specs *rs = &dpd->radar_spec[i]; + + /* no need to look up for pde if len is not within range */ + if ((rs->width_min > len) || + (rs->width_max < len)) { + continue; + } + + pde = pri_detector_get(dpd, freq, i); + ps = pde->ops->add_pulse(radar, pde, len, dpd->last_pulse_ts, pri); + + if (ps != NULL) { + print_radar_detect_info(pde, ps); +#ifdef CREATE_TRACE_POINTS + trace_radar_detected(chain, dpd->region, pde->freq, i, ps->pri); +#endif + // reset everything instead of just the channel detector + dfs_pattern_detector_reset(dpd); + return true; + } + } + + return false; +} + +/** + * get_dfs_domain_radar_types() - get radar types for a given DFS domain + * @param domain DFS domain + * @return radar_types ptr on success, NULL if DFS domain is not supported + */ +static const struct radar_types * +get_dfs_domain_radar_types(enum nl80211_dfs_regions region) +{ + u32 i; + for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) { + if (dfs_domains[i]->region == region) + return dfs_domains[i]; + } + return NULL; +} + +/** + * get_dfs_max_radar_types() - get maximum radar types for all supported domain + * @return the maximum number of radar pattern supported by on region + */ +static u16 get_dfs_max_radar_types(void) +{ + u32 i; + u16 max = 0; + for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) { + if (dfs_domains[i]->num_radar_types > max) + max = dfs_domains[i]->num_radar_types; + } + return max; +} + +/** + * dfs_pattern_detector_set_domain - set DFS domain + * + * @dpd: dfs_pattern_detector + * @region: DFS region + * + * set DFS domain, resets detector lines upon domain changes + */ +static +bool dfs_pattern_detector_set_domain(struct dfs_pattern_detector *dpd, + enum nl80211_dfs_regions region, u8 chain) +{ + const struct radar_types *rt; + struct pri_detector *pde, *pde0; + int i; + + if (dpd->region == region) + return true; + + dpd->region = NL80211_DFS_UNSET; + + rt = get_dfs_domain_radar_types(region); + if (rt == NULL) + return false; + + /* delete all pri detectors for previous DFS domain */ + for (i = 0; i < dpd->num_radar_types; i++) { + if (!list_empty(&dpd->detectors[i])) + list_for_each_entry_safe(pde, pde0, &dpd->detectors[i], head) + pri_detector_exit(pde); + } + + if (chain == RWNX_RADAR_RIU) + dpd->radar_spec = rt->spec_riu; + else + dpd->radar_spec = rt->spec_fcu; + dpd->num_radar_types = rt->num_radar_types; + + dpd->region = region; + + AICWFDBG(LOGINFO,"set_region %d \n",region); + return true; +} + +/** + * dfs_pattern_detector_init - Initialize dfs_pattern_detector + * + * @region: DFS region + * @return: pointer on dfs_pattern_detector + * + */ +static struct dfs_pattern_detector * +dfs_pattern_detector_init(enum nl80211_dfs_regions region, u8 chain) +{ + struct dfs_pattern_detector *dpd; + u16 i, max_radar_type = get_dfs_max_radar_types(); + + dpd = kmalloc(sizeof(*dpd) + max_radar_type * sizeof(dpd->detectors[0]), + GFP_KERNEL); + if (dpd == NULL) + return NULL; + + dpd->region = NL80211_DFS_UNSET; + dpd->enabled = RWNX_RADAR_DETECT_DISABLE; + dpd->last_pulse_ts = 0; + dpd->prev_jiffies = jiffies; + dpd->num_radar_types = 0; + for (i = 0; i < max_radar_type; i++) + INIT_LIST_HEAD(&dpd->detectors[i]); + + if (dfs_pattern_detector_set_domain(dpd, region, chain)) + return dpd; + + kfree(dpd); + return NULL; +} + + +/****************************************************************************** + * driver interface + *****************************************************************************/ +static u16 rwnx_radar_get_center_freq(struct rwnx_hw *rwnx_hw, u8 chain) +{ + if (chain == RWNX_RADAR_FCU) + return rwnx_hw->phy.sec_chan.center_freq1; + + if (chain == RWNX_RADAR_RIU) { +#ifdef CONFIG_RWNX_FULLMAC + if (!rwnx_chanctx_valid(rwnx_hw, rwnx_hw->cur_chanctx)) { + WARN(1, "Radar pulse without channel information"); + } else + return rwnx_hw->chanctx_table[rwnx_hw->cur_chanctx].chan_def.center_freq1; +#endif /* CONFIG_RWNX_FULLMAC */ + } + + return 0; +} + +static void rwnx_radar_detected(struct rwnx_hw *rwnx_hw) +{ +#ifdef CONFIG_RWNX_FULLMAC + struct cfg80211_chan_def chan_def; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + if (!rwnx_chanctx_valid(rwnx_hw, rwnx_hw->cur_chanctx)) { + WARN(1, "Radar detected without channel information"); + return; + } + + /* + recopy chan_def in local variable because rwnx_radar_cancel_cac may + clean the variable (if in CAC and it's the only vif using this context) + and CAC should be aborted before reporting the radar. + */ + chan_def = rwnx_hw->chanctx_table[rwnx_hw->cur_chanctx].chan_def; + + rwnx_radar_cancel_cac(&rwnx_hw->radar); + cfg80211_radar_event(rwnx_hw->wiphy, &chan_def, GFP_KERNEL); + +#endif /* CONFIG_RWNX_FULLMAC */ +} + +static void rwnx_radar_process_pulse(struct work_struct *ws) +{ + struct rwnx_radar *radar = container_of(ws, struct rwnx_radar, + detection_work); + struct rwnx_hw *rwnx_hw = container_of(radar, struct rwnx_hw, radar); + int chain; + u32 pulses[RWNX_RADAR_LAST][RWNX_RADAR_PULSE_MAX]; + u16 pulses_count[RWNX_RADAR_LAST]; + u32 now = jiffies; /* would be better to store jiffies value in IT handler */ + #ifdef RWNX_RADAR_DUMP_EN + struct rwnx_radar_dump *rm = radar->rmem; + #endif + + /* recopy pulses locally to avoid too long spin_lock */ + spin_lock_bh(&radar->lock); + for (chain = RWNX_RADAR_RIU; chain < RWNX_RADAR_LAST; chain++) { + int start, count; + + count = radar->pulses[chain].count; + start = radar->pulses[chain].index - count; + if (start < 0) + start += RWNX_RADAR_PULSE_MAX; + + pulses_count[chain] = count; + if (count == 0) + continue; + + if ((start + count) > RWNX_RADAR_PULSE_MAX) { + u16 count1 = (RWNX_RADAR_PULSE_MAX - start); + memcpy(&(pulses[chain][0]), + &(radar->pulses[chain].buffer[start]), + count1 * sizeof(struct radar_pulse)); + memcpy(&(pulses[chain][count1]), + &(radar->pulses[chain].buffer[0]), + (count - count1) * sizeof(struct radar_pulse)); + } else { + memcpy(&(pulses[chain][0]), + &(radar->pulses[chain].buffer[start]), + count * sizeof(struct radar_pulse)); + } + radar->pulses[chain].count = 0; + } + spin_unlock_bh(&radar->lock); + + + /* now process pulses */ + for (chain = RWNX_RADAR_RIU; chain < RWNX_RADAR_LAST; chain++) { + int i; + u16 freq; + + if (pulses_count[chain] == 0) + continue; + + freq = rwnx_radar_get_center_freq(rwnx_hw, chain); + + for (i = 0; i < pulses_count[chain] ; i++) { + struct radar_pulse *p = (struct radar_pulse *)&pulses[chain][i]; +#ifdef CREATE_TRACE_POINTS + trace_radar_pulse(chain, p); +#endif + #ifdef RWNX_RADAR_DUMP_EN + if(chain == RWNX_RADAR_RIU) { + u16 pri; + struct dfs_pattern_detector *dpd = NULL; + rm->ps[rm->idx] = pulses[chain][i]; + rm->tm[rm->idx] = now; + rm->cnt ++; + + pri = p->rep; + dpd = radar->dpd[chain]; + if (pri == 0) { + u32 delta_jiffie; + if (unlikely(now < dpd->prev_jiffies)) { + delta_jiffie = 0xffffffff - dpd->prev_jiffies + now; + } else { + delta_jiffie = now - dpd->prev_jiffies; + } + rm->ts[rm->idx] = dpd->last_pulse_ts + jiffies_to_usecs(delta_jiffie); + } else { + rm->ts[rm->idx] = dpd->last_pulse_ts + pri; + } + rm->idx = (rm->idx + 1) % RWNX_RADARR_DUMP_NB; + + if(rm->cnt == 1) + { + struct radar_detector_specs *spc; + int k; + AICWFDBG(LOGINFO, "dpd: en = %d, region = %d, ntype = %d", dpd->enabled, dpd->region, dpd->num_radar_types); + AICWFDBG(LOGINFO, "id wid.{min, max} pri.{min, max, num}, ppb, thd, tlrn, type"); + for (k = 0; k < dpd->num_radar_types; k++) + { + spc = &dpd->radar_spec[k]; + AICWFDBG(LOGINFO, "%d %3d, %3d %4d %4d %d %2d %2d %d %d", + spc->type_id, + spc->width_min, + spc->width_max, + spc->pri_min, + spc->pri_max, + spc->num_pri, + spc->ppb, + spc->ppb_thresh, + spc->max_pri_tolerance, + spc->type); + } + } + } + #endif + if((radar->status != RWNX_RADAR_CAC_BUSY) && (radar->status != RWNX_RADAR_INSERVICE_BUSY)) { + break; + } + AICWFDBG(LOGTRACE, "+r:%x\n", *(uint32_t *)p); + if (dfs_pattern_detector_add_pulse(radar, radar->dpd[chain], chain, + (s16)freq + (2 * p->freq), + p->rep, (p->len * 2), now)) { + + u16 idx = radar->detected[chain].index; + + #ifdef RWNX_RADAR_DUMP_EN + if (chain == RWNX_RADAR_RIU) { + int k; + AICWFDBG(LOGINFO, "\t received pulses %d %d\n", rm->cnt, rm->idx-1); + for(k=0; kidx, rm->ps[rm->idx], rm->ts[rm->idx], rm->tm[rm->idx]); + rm->idx = (rm->idx + 1) % RWNX_RADARR_DUMP_NB; + } + } + #endif + + if (chain == RWNX_RADAR_RIU) { + /* operating chain, inform upper layer to change channel */ + if (radar->dpd[chain]->enabled == RWNX_RADAR_DETECT_REPORT) { + rwnx_radar_detected(rwnx_hw); + /* no need to report new radar until upper layer set a + new channel. This prevent warning if a new radar is + detected while mac80211 is changing channel */ + rwnx_radar_detection_enable(radar, + RWNX_RADAR_DETECT_DISABLE, + chain); + /* purge any event received since the beginning of the + function (we are sure not to interfer with tasklet + as we disable detection just before) */ + radar->pulses[chain].count = 0; + } + } else { + /* secondary radar detection chain, simply report info in + debugfs for now */ + } + + radar->detected[chain].freq[idx] = (s16)freq + (2 * p->freq); + radar->detected[chain].time[idx] = ktime_get_real_seconds(); + radar->detected[chain].index = ((idx + 1 ) % + NX_NB_RADAR_DETECTED); + radar->detected[chain].count++; + /* no need to process next pulses for this chain */ + break; + } + } + } +} + +#ifdef CONFIG_RWNX_FULLMAC +static void rwnx_radar_cac_work(struct work_struct *ws) +{ + struct delayed_work *dw = container_of(ws, struct delayed_work, work); + struct rwnx_radar *radar = container_of(dw, struct rwnx_radar, cac_work); + struct rwnx_hw *rwnx_hw = container_of(radar, struct rwnx_hw, radar); + struct rwnx_chanctx *ctxt; + + if (radar->cac_vif == NULL) { + WARN(1, "CAC finished but no vif set"); + return; + } + + ctxt = &rwnx_hw->chanctx_table[radar->cac_vif->ch_index]; + cfg80211_cac_event(radar->cac_vif->ndev, + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + &ctxt->chan_def, + #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0) + NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0); +#else + NL80211_RADAR_CAC_FINISHED, GFP_KERNEL); +#endif + rwnx_send_apm_stop_cac_req(rwnx_hw, radar->cac_vif); + rwnx_chanctx_unlink(radar->cac_vif); + + radar->cac_vif = NULL; +} +#endif /* CONFIG_RWNX_FULLMAC */ + + +void rwnx_radar_reset_rmem(struct rwnx_radar *radar) +{ +#ifdef RWNX_RADAR_DUMP_EN + if (radar->rmem != NULL) { + memset((void *)radar->rmem, 0, sizeof(*radar->rmem)); + } +#endif +} + +void rwnx_radar_init_rmem(struct rwnx_radar *radar) +{ +#ifdef RWNX_RADAR_DUMP_EN + if (radar->rmem == NULL) { + radar->rmem = kmalloc(sizeof(*radar->rmem), GFP_ATOMIC); + } + rwnx_radar_reset_rmem(radar); +#endif +} + +void rwnx_radar_deinit_rmem(struct rwnx_radar *radar) +{ +#ifdef RWNX_RADAR_DUMP_EN + if (radar->rmem != NULL) { + kfree(radar->rmem); + } +#endif +} + + +bool rwnx_radar_detection_init(struct rwnx_radar *radar) +{ + spin_lock_init(&radar->lock); + + radar->dpd[RWNX_RADAR_RIU] = dfs_pattern_detector_init(NL80211_DFS_UNSET, + RWNX_RADAR_RIU); + if (radar->dpd[RWNX_RADAR_RIU] == NULL) + return false; + + radar->dpd[RWNX_RADAR_FCU] = dfs_pattern_detector_init(NL80211_DFS_UNSET, + RWNX_RADAR_FCU); + rwnx_radar_init_rmem(radar); + radar->status = RWNX_RADAR_IDLE; + AICWFDBG(LOGINFO, "DFS: radar st = %d\n", radar->status); + + if (radar->dpd[RWNX_RADAR_FCU] == NULL) { + rwnx_radar_detection_deinit(radar); + return false; + } + + INIT_WORK(&radar->detection_work, rwnx_radar_process_pulse); +#ifdef CONFIG_RWNX_FULLMAC + INIT_DELAYED_WORK(&radar->cac_work, rwnx_radar_cac_work); + radar->cac_vif = NULL; +#endif /* CONFIG_RWNX_FULLMAC */ + return true; +} + +void rwnx_radar_detection_deinit(struct rwnx_radar *radar) +{ + if (radar->dpd[RWNX_RADAR_RIU]) { + dfs_pattern_detector_exit(radar->dpd[RWNX_RADAR_RIU]); + radar->dpd[RWNX_RADAR_RIU] = NULL; + } + if (radar->dpd[RWNX_RADAR_FCU]) { + dfs_pattern_detector_exit(radar->dpd[RWNX_RADAR_FCU]); + radar->dpd[RWNX_RADAR_FCU] = NULL; + } + rwnx_radar_deinit_rmem(radar); +} + +bool rwnx_radar_set_domain(struct rwnx_radar *radar, + enum nl80211_dfs_regions region) +{ + AICWFDBG(LOGINFO, "%s \n",__func__); + if (radar->dpd[0] == NULL) + return false; +#ifdef CREATE_TRACE_POINTS + trace_radar_set_region(region); +#endif + return (dfs_pattern_detector_set_domain(radar->dpd[RWNX_RADAR_RIU], + region, RWNX_RADAR_RIU) && + dfs_pattern_detector_set_domain(radar->dpd[RWNX_RADAR_FCU], + region, RWNX_RADAR_FCU)); +} + +void rwnx_radar_detection_enable(struct rwnx_radar *radar, u8 enable, u8 chain) +{ + //printk("%s enable:%u chain:%u \n",__func__,enable,chain); + if (chain < RWNX_RADAR_LAST ) { +#ifdef CREATE_TRACE_POINTS + trace_radar_enable_detection(radar->dpd[chain]->region, enable, chain); +#endif + spin_lock_bh(&radar->lock); + radar->dpd[chain]->enabled = enable; + spin_unlock_bh(&radar->lock); + } +} + +bool rwnx_radar_detection_is_enable(struct rwnx_radar *radar, u8 chain) +{ + return radar->dpd[chain]->enabled != RWNX_RADAR_DETECT_DISABLE; +} + +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_radar_start_cac(struct rwnx_radar *radar, u32 cac_time_ms, + struct rwnx_vif *vif) +{ + WARN(radar->cac_vif != NULL, "CAC already in progress"); + radar->cac_vif = vif; + AICWFDBG(LOGINFO, "%s DFS: cac time = %u ms", __func__, cac_time_ms); + schedule_delayed_work(&radar->cac_work, msecs_to_jiffies(cac_time_ms)); +} + +void rwnx_radar_cancel_cac(struct rwnx_radar *radar) +{ + struct rwnx_hw *rwnx_hw = container_of(radar, struct rwnx_hw, radar); + + if (radar->cac_vif == NULL) { + return; + } + + if (cancel_delayed_work(&radar->cac_work)) { + struct rwnx_chanctx *ctxt; + ctxt = &rwnx_hw->chanctx_table[radar->cac_vif->ch_index]; + rwnx_send_apm_stop_cac_req(rwnx_hw, radar->cac_vif); + cfg80211_cac_event(radar->cac_vif->ndev, + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + &ctxt->chan_def, + #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0) + NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0); +#else + NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); +#endif + rwnx_chanctx_unlink(radar->cac_vif); + } + + radar->cac_vif = NULL; +} + +void rwnx_radar_detection_enable_on_cur_channel(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_chanctx *ctxt; + + /* If no information on current channel do nothing */ + if (!rwnx_chanctx_valid(rwnx_hw, rwnx_hw->cur_chanctx)) + return; + + ctxt = &rwnx_hw->chanctx_table[rwnx_hw->cur_chanctx]; + if (ctxt->chan_def.chan->flags & IEEE80211_CHAN_RADAR) { + rwnx_radar_detection_enable(&rwnx_hw->radar, + RWNX_RADAR_DETECT_REPORT, + RWNX_RADAR_RIU); + } else { + rwnx_radar_detection_enable(&rwnx_hw->radar, + RWNX_RADAR_DETECT_DISABLE, + RWNX_RADAR_RIU); + } +} +#endif /* CONFIG_RWNX_FULLMAC */ + +/***************************************************************************** + * Debug functions + *****************************************************************************/ +static +int rwnx_radar_dump_pri_detector(char *buf, size_t len, + struct pri_detector *pde) +{ + char freq_info[] = "Freq = %3.dMhz\n"; + char seq_info[] = " pri | count | false \n"; + struct pri_sequence *seq; + int res, write = 0; + + if (list_empty(&pde->sequences)) { + return 0; + } + + if (buf == NULL) { + int nb_seq = 1; + list_for_each_entry(seq, &pde->sequences, head) { + nb_seq++; + } + + return (sizeof(freq_info) + nb_seq * sizeof(seq_info)); + } + + res = scnprintf(buf, len, freq_info, pde->freq); + write += res; + len -= res; + + res = scnprintf(&buf[write], len, "%s", seq_info); + write += res; + len -= res; + + list_for_each_entry(seq, &pde->sequences, head) { + res = scnprintf(&buf[write], len, " %6.d | %2.d | %.2d \n", + seq->pri, seq->count, seq->count_falses); + write += res; + len -= res; + } + + return write; +} + +int rwnx_radar_dump_pattern_detector(char *buf, size_t len, + struct rwnx_radar *radar, u8 chain) +{ + struct dfs_pattern_detector *dpd = radar->dpd[chain]; + char info[] = "Type = %3.d\n"; + struct pri_detector *pde; + int i, res, write = 0; + + /* if buf is NULL return size needed for dump */ + if (buf == NULL) { + int size_needed = 0; + + for (i = 0; i < dpd->num_radar_types; i++) { + list_for_each_entry(pde, &dpd->detectors[i], head) { + size_needed += rwnx_radar_dump_pri_detector(NULL, 0, pde); + } + size_needed += sizeof(info); + + return size_needed; + } + } + + /* */ + for (i = 0; i < dpd->num_radar_types; i++) { + res = scnprintf(&buf[write], len, info, i); + + write += res; + len -= res; + list_for_each_entry(pde, &dpd->detectors[i], head) { + res = rwnx_radar_dump_pri_detector(&buf[write], len, pde); + write += res; + len -= res; + } + } + + return write; +} + + +int rwnx_radar_dump_radar_detected(char *buf, size_t len, + struct rwnx_radar *radar, u8 chain) +{ + struct rwnx_radar_detected *detect = &(radar->detected[chain]); + char info[] = "2001/02/02 - 02:20 5126MHz\n"; + int idx, i, res, write = 0; + int count = detect->count; + + if (count > NX_NB_RADAR_DETECTED) + count = NX_NB_RADAR_DETECTED; + + if (buf == NULL) { + return (count * sizeof(info)) + 1; + } + + idx = (detect->index - detect->count) % NX_NB_RADAR_DETECTED; + + for (i = 0; i < count; i++) { + struct tm tm; + time64_to_tm(detect->time[idx], 0, &tm); + + res = scnprintf(&buf[write], len, + "%.4d/%.2d/%.2d - %.2d:%.2d %4.4dMHz\n", + (int)tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, detect->freq[idx]); + write += res; + len -= res; + + idx = (idx + 1 ) % NX_NB_RADAR_DETECTED; + } + + return write; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_radar.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_radar.h new file mode 100644 index 0000000..5e2b8e4 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_radar.h @@ -0,0 +1,186 @@ +/** + ****************************************************************************** + * + * @file rwnx_radar.h + * + * @brief Functions to handle radar detection + * + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#ifndef _RWNX_RADAR_H_ +#define _RWNX_RADAR_H_ + +#include + +struct rwnx_vif; +struct rwnx_hw; + +enum rwnx_radar_chain { + RWNX_RADAR_RIU = 0, + RWNX_RADAR_FCU, + RWNX_RADAR_LAST +}; + +enum rwnx_radar_detector { + RWNX_RADAR_DETECT_DISABLE = 0, /* Ignore radar pulses */ + RWNX_RADAR_DETECT_ENABLE = 1, /* Process pattern detection but do not + report radar to upper layer (for test) */ + RWNX_RADAR_DETECT_REPORT = 2 /* Process pattern detection and report + radar to upper layer. */ +}; + +#ifdef CONFIG_RWNX_RADAR +#include +#include + +#define RWNX_RADAR_PULSE_MAX 32 + +/** + * struct rwnx_radar_pulses - List of pulses reported by HW + * @index: write index + * @count: number of valid pulses + * @buffer: buffer of pulses + */ +struct rwnx_radar_pulses { + /* Last radar pulses received */ + int index; + int count; + u32 buffer[RWNX_RADAR_PULSE_MAX]; +}; + +/** + * struct dfs_pattern_detector - DFS pattern detector + * @region: active DFS region, NL80211_DFS_UNSET until set + * @num_radar_types: number of different radar types + * @last_pulse_ts: time stamp of last valid pulse in usecs + * @prev_jiffies: + * @radar_detector_specs: array of radar detection specs + * @channel_detectors: list connecting channel_detector elements + */ +struct dfs_pattern_detector { + u8 enabled; + enum nl80211_dfs_regions region; + u8 num_radar_types; + u64 last_pulse_ts; + u32 prev_jiffies; + const struct radar_detector_specs *radar_spec; + struct list_head detectors[]; +}; + +#define NX_NB_RADAR_DETECTED 4 + +/** + * struct rwnx_radar_detected - List of radar detected + */ +struct rwnx_radar_detected { + u16 index; + u16 count; + s64 time[NX_NB_RADAR_DETECTED]; + s16 freq[NX_NB_RADAR_DETECTED]; +}; + +#define RWNX_RADAR_DUMP_EN 1 +#ifdef RWNX_RADAR_DUMP_EN +#define RWNX_RADARR_DUMP_NB 32 +struct rwnx_radar_dump { + u32 cnt; + u32 idx; + u32 ps[RWNX_RADARR_DUMP_NB]; + u64 tm[RWNX_RADARR_DUMP_NB]; + u64 ts[RWNX_RADARR_DUMP_NB]; +}; +#endif +enum rwnx_radar_status { + RWNX_RADAR_IDLE = 0, + RWNX_RADAR_CAC_BUSY = 1, + RWNX_RADAR_CAC_DONE = 2, + RWNX_RADAR_INSERVICE_BUSY = 3, + RWNX_RADAR_INSERVICE_DONE = 4 +}; +struct rwnx_radar { + struct rwnx_radar_pulses pulses[RWNX_RADAR_LAST]; + struct dfs_pattern_detector *dpd[RWNX_RADAR_LAST]; + struct rwnx_radar_detected detected[RWNX_RADAR_LAST]; + #ifdef RWNX_RADAR_DUMP_EN + struct rwnx_radar_dump *rmem; + #endif + u16 status; + u16 sta_num; + struct work_struct detection_work; /* Work used to process radar pulses */ + spinlock_t lock; /* lock for pulses processing */ + + /* In softmac cac is handled by mac80211 */ +#ifdef CONFIG_RWNX_FULLMAC + struct delayed_work cac_work; /* Work used to handle CAC */ + struct rwnx_vif *cac_vif; /* vif on which we started CAC */ +#endif +}; + +void rwnx_radar_reset_rmem(struct rwnx_radar *radar); +void rwnx_radar_init_rmem(struct rwnx_radar *radar); +void rwnx_radar_deinit_rmem(struct rwnx_radar *radar); + +bool rwnx_radar_detection_init(struct rwnx_radar *radar); +void rwnx_radar_detection_deinit(struct rwnx_radar *radar); +bool rwnx_radar_set_domain(struct rwnx_radar *radar, + enum nl80211_dfs_regions region); +void rwnx_radar_detection_enable(struct rwnx_radar *radar, u8 enable, u8 chain); +bool rwnx_radar_detection_is_enable(struct rwnx_radar *radar, u8 chain); +void rwnx_radar_start_cac(struct rwnx_radar *radar, u32 cac_time_ms, + struct rwnx_vif *vif); +void rwnx_radar_cancel_cac(struct rwnx_radar *radar); +void rwnx_radar_detection_enable_on_cur_channel(struct rwnx_hw *rwnx_hw); +int rwnx_radar_dump_pattern_detector(char *buf, size_t len, + struct rwnx_radar *radar, u8 chain); +int rwnx_radar_dump_radar_detected(char *buf, size_t len, + struct rwnx_radar *radar, u8 chain); + +#else + +struct rwnx_radar { +}; + +static inline bool rwnx_radar_detection_init(struct rwnx_radar *radar) +{return true;} + +static inline void rwnx_radar_detection_deinit(struct rwnx_radar *radar) +{} + +static inline bool rwnx_radar_set_domain(struct rwnx_radar *radar, + enum nl80211_dfs_regions region) +{return true;} + +static inline void rwnx_radar_detection_enable(struct rwnx_radar *radar, + u8 enable, u8 chain) +{} + +static inline bool rwnx_radar_detection_is_enable(struct rwnx_radar *radar, + u8 chain) +{return false;} + +static inline void rwnx_radar_start_cac(struct rwnx_radar *radar, + u32 cac_time_ms, struct rwnx_vif *vif) +{} + +static inline void rwnx_radar_cancel_cac(struct rwnx_radar *radar) +{} + +static inline void rwnx_radar_detection_enable_on_cur_channel(struct rwnx_hw *rwnx_hw) +{} + +static inline int rwnx_radar_dump_pattern_detector(char *buf, size_t len, + struct rwnx_radar *radar, + u8 chain) +{return 0;} + +static inline int rwnx_radar_dump_radar_detected(char *buf, size_t len, + struct rwnx_radar *radar, + u8 chain) +{return 0;} + +#endif /* CONFIG_RWNX_RADAR */ + +#endif // _RWNX_RADAR_H_ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_rx.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_rx.c new file mode 100755 index 0000000..eb6d60a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_rx.c @@ -0,0 +1,2794 @@ +/** + ****************************************************************************** + * + * @file rwnx_rx.c + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#include +#include +#include +#include + +#include "rwnx_defs.h" +#include "rwnx_rx.h" +#include "rwnx_tx.h" +#include "rwnx_prof.h" +#include "ipc_host.h" +#include "rwnx_events.h" +#include "rwnx_compat.h" +#include "aicwf_txrxif.h" +#ifdef AICWF_ARP_OFFLOAD +#include +#include +#include "rwnx_msg_tx.h" +#endif +#include "rwnx_main.h" +#ifdef CONFIG_BAND_STEERING +#include "aicwf_manager.h" +#endif + +#ifndef IEEE80211_MAX_CHAINS +#define IEEE80211_MAX_CHAINS 4 +#endif + +u8 dhcped = 0; + +u16 tx_legrates_lut_rate[] = { + 10 , + 20 , + 55 , + 110 , + 60 , + 90 , + 120, + 180, + 240, + 360, + 480, + 540 +}; + +struct rwnx_legrate legrates_lut[] = { + [0] = { .idx = 0, .rate = 10}, + [1] = { .idx = 1, .rate = 20}, + [2] = { .idx = 2, .rate = 55}, + [3] = { .idx = 3, .rate = 110}, + [4] = { .idx = -1, .rate = 0}, + [5] = { .idx = -1, .rate = 0}, + [6] = { .idx = -1, .rate = 0}, + [7] = { .idx = -1, .rate = 0}, + [8] = { .idx = 10, .rate = 480}, + [9] = { .idx = 8, .rate = 240}, + [10] = { .idx = 6, .rate = 120}, + [11] = { .idx = 4, .rate = 60}, + [12] = { .idx = 11, .rate = 540}, + [13] = { .idx = 9, .rate = 360}, + [14] = { .idx = 7, .rate = 180}, + [15] = { .idx = 5, .rate = 90}, +}; + +#ifdef CONFIG_BAND_STEERING +typedef struct _op_class_ { + u8_l op_class; + u8_l band; /* 0: 2.4g, 1: 5g*/ +} _op_class_; + +static const _op_class_ g_op_class[] = { + { 81, BAND_ON_24G }, + { 82, BAND_ON_24G }, + { 83, BAND_ON_24G }, + { 84, BAND_ON_24G }, + { 115, BAND_ON_5G }, + { 116, BAND_ON_5G }, + { 117, BAND_ON_5G }, + { 118, BAND_ON_5G }, + { 119, BAND_ON_5G }, + { 120, BAND_ON_5G }, + { 121, BAND_ON_5G }, + { 122, BAND_ON_5G }, + { 123, BAND_ON_5G }, + { 124, BAND_ON_5G }, + { 125, BAND_ON_5G }, + { 126, BAND_ON_5G }, + { 127, BAND_ON_5G }, + { 128, BAND_ON_5G } +}; + +#define BAND_CAP_2G BIT(BAND_ON_24G) +#define BAND_CAP_5G BIT(BAND_ON_5G) +#endif + +struct vendor_radiotap_hdr { + u8 oui[3]; + u8 subns; + u16 len; + u8 data[]; +}; + +void rwnx_skb_align_8bytes(struct sk_buff *skb); + + +/** + * rwnx_rx_get_vif - Return pointer to the destination vif + * + * @rwnx_hw: main driver data + * @vif_idx: vif index present in rx descriptor + * + * Select the vif that should receive this frame. Returns NULL if the destination + * vif is not active or vif is not specified in the descriptor. + */ +static inline +struct rwnx_vif *rwnx_rx_get_vif(struct rwnx_hw *rwnx_hw, int vif_idx) +{ + struct rwnx_vif *rwnx_vif = NULL; + + if (vif_idx < NX_VIRT_DEV_MAX) { + rwnx_vif = rwnx_hw->vif_table[vif_idx]; + if(!rwnx_vif){ + AICWFDBG(LOGERROR, "%s rwnx_hw->vif_table[%d] NULL\r\n", __func__, vif_idx); + return NULL; + }else if(!rwnx_vif->up){ + AICWFDBG(LOGERROR, "%s rwnx_hw->vif_table[%d] is down\r\n", __func__, vif_idx); + return NULL; + } + } + + return rwnx_vif; +} + +/** + * rwnx_rx_vector_convert - Convert a legacy RX vector into a new RX vector format + * + * @rwnx_hw: main driver data. + * @rx_vect1: Rx vector 1 descriptor of the received frame. + * @rx_vect2: Rx vector 2 descriptor of the received frame. + */ +static void rwnx_rx_vector_convert(struct rwnx_hw *rwnx_hw, + struct rx_vector_1 *rx_vect1, + struct rx_vector_2 *rx_vect2) +{ + struct rx_vector_1_old rx_vect1_leg; + struct rx_vector_2_old rx_vect2_leg; + u32_l phy_vers = rwnx_hw->version_cfm.version_phy_2; + + // Check if we need to do the conversion. Only if old modem is used + if (__MDM_MAJOR_VERSION(phy_vers) > 0) { + rx_vect1->rssi1 = rx_vect1->rssi_leg; + return; + } + + // Copy the received vector locally + memcpy(&rx_vect1_leg, rx_vect1, sizeof(struct rx_vector_1_old)); + + // Reset it + memset(rx_vect1, 0, sizeof(struct rx_vector_1)); + + // Perform the conversion + rx_vect1->format_mod = rx_vect1_leg.format_mod; + rx_vect1->ch_bw = rx_vect1_leg.ch_bw; + rx_vect1->antenna_set = rx_vect1_leg.antenna_set; + rx_vect1->leg_length = rx_vect1_leg.leg_length; + rx_vect1->leg_rate = rx_vect1_leg.leg_rate; + rx_vect1->rssi1 = rx_vect1_leg.rssi1; + + switch (rx_vect1->format_mod) { + case FORMATMOD_NON_HT: + case FORMATMOD_NON_HT_DUP_OFDM: + rx_vect1->leg.lsig_valid = rx_vect1_leg.lsig_valid; + rx_vect1->leg.chn_bw_in_non_ht = rx_vect1_leg.num_extn_ss; + rx_vect1->leg.dyn_bw_in_non_ht = rx_vect1_leg.dyn_bw; + break; + case FORMATMOD_HT_MF: + case FORMATMOD_HT_GF: + rx_vect1->ht.aggregation = rx_vect1_leg.aggregation; + rx_vect1->ht.fec = rx_vect1_leg.fec_coding; + rx_vect1->ht.lsig_valid = rx_vect1_leg.lsig_valid; + rx_vect1->ht.length = rx_vect1_leg.ht_length; + rx_vect1->ht.mcs = rx_vect1_leg.mcs; + rx_vect1->ht.num_extn_ss = rx_vect1_leg.num_extn_ss; + rx_vect1->ht.short_gi = rx_vect1_leg.short_gi; + rx_vect1->ht.smoothing = rx_vect1_leg.smoothing; + rx_vect1->ht.sounding = rx_vect1_leg.sounding; + rx_vect1->ht.stbc = rx_vect1_leg.stbc; + break; + case FORMATMOD_VHT: + rx_vect1->vht.beamformed = !rx_vect1_leg.smoothing; + rx_vect1->vht.fec = rx_vect1_leg.fec_coding; + rx_vect1->vht.length = rx_vect1_leg.ht_length | rx_vect1_leg._ht_length << 8; + rx_vect1->vht.mcs = rx_vect1_leg.mcs & 0x0F; + rx_vect1->vht.nss = rx_vect1_leg.stbc ? rx_vect1_leg.n_sts/2 : rx_vect1_leg.n_sts; + rx_vect1->vht.doze_not_allowed = rx_vect1_leg.doze_not_allowed; + rx_vect1->vht.short_gi = rx_vect1_leg.short_gi; + rx_vect1->vht.sounding = rx_vect1_leg.sounding; + rx_vect1->vht.stbc = rx_vect1_leg.stbc; + rx_vect1->vht.group_id = rx_vect1_leg.group_id; + rx_vect1->vht.partial_aid = rx_vect1_leg.partial_aid; + rx_vect1->vht.first_user = rx_vect1_leg.first_user; + break; + } + + if (!rx_vect2) + return; + + // Copy the received vector 2 locally + memcpy(&rx_vect2_leg, rx_vect2, sizeof(struct rx_vector_2_old)); + + // Reset it + memset(rx_vect2, 0, sizeof(struct rx_vector_2)); + + rx_vect2->rcpi1 = rx_vect2_leg.rcpi; + rx_vect2->rcpi2 = rx_vect2_leg.rcpi; + rx_vect2->rcpi3 = rx_vect2_leg.rcpi; + rx_vect2->rcpi4 = rx_vect2_leg.rcpi; + + rx_vect2->evm1 = rx_vect2_leg.evm1; + rx_vect2->evm2 = rx_vect2_leg.evm2; + rx_vect2->evm3 = rx_vect2_leg.evm3; + rx_vect2->evm4 = rx_vect2_leg.evm4; +} + +/** + * rwnx_rx_statistic - save some statistics about received frames + * + * @rwnx_hw: main driver data. + * @hw_rxhdr: Rx Hardware descriptor of the received frame. + * @sta: STA that sent the frame. + */ +static void rwnx_rx_statistic(struct rwnx_hw *rwnx_hw, struct hw_rxhdr *hw_rxhdr, + struct rwnx_sta *sta) +{ +#if 1//def CONFIG_RWNX_DEBUGFS + struct rwnx_stats *stats = &rwnx_hw->stats; + struct rwnx_rx_rate_stats *rate_stats = &sta->stats.rx_rate; + struct rx_vector_1 *rxvect = &hw_rxhdr->hwvect.rx_vect1; + int mpdu, ampdu, mpdu_prev, rate_idx; + + /* save complete hwvect */ + sta->stats.last_rx = hw_rxhdr->hwvect; + + /* update ampdu rx stats */ + mpdu = hw_rxhdr->hwvect.mpdu_cnt; + ampdu = hw_rxhdr->hwvect.ampdu_cnt; + mpdu_prev = stats->ampdus_rx_map[ampdu]; + + if (mpdu_prev < mpdu ) { + stats->ampdus_rx_miss += mpdu - mpdu_prev - 1; + } else { + stats->ampdus_rx[mpdu_prev]++; + } + stats->ampdus_rx_map[ampdu] = mpdu; + + /* update rx rate statistic */ + if (!rate_stats->size) + return; + + if (rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM) { + int mcs; + int bw = rxvect->ch_bw; + int sgi; + int nss; + switch (rxvect->format_mod) { + case FORMATMOD_HT_MF: + case FORMATMOD_HT_GF: + mcs = rxvect->ht.mcs % 8; + nss = rxvect->ht.mcs / 8; + sgi = rxvect->ht.short_gi; + rate_idx = N_CCK + N_OFDM + nss * 32 + mcs * 4 + bw * 2 + sgi; + break; + case FORMATMOD_VHT: + mcs = rxvect->vht.mcs; + nss = rxvect->vht.nss; + sgi = rxvect->vht.short_gi; + rate_idx = N_CCK + N_OFDM + N_HT + nss * 80 + mcs * 8 + bw * 2 + sgi; + break; + case FORMATMOD_HE_SU: + mcs = rxvect->he.mcs; + nss = rxvect->he.nss; + sgi = rxvect->he.gi_type; + rate_idx = N_CCK + N_OFDM + N_HT + N_VHT + nss * 144 + mcs * 12 + bw * 3 + sgi; + break; + default: + mcs = rxvect->he.mcs; + nss = rxvect->he.nss; + sgi = rxvect->he.gi_type; + rate_idx = N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU + + nss * 216 + mcs * 18 + rxvect->he.ru_size * 3 + sgi; + break; + } + } else { + int idx = legrates_lut[rxvect->leg_rate].idx; + if (idx < 4) { + rate_idx = idx * 2 + rxvect->pre_type; + } else { + rate_idx = N_CCK + idx - 4; + } + } + if (rate_idx < rate_stats->size) { + if (!rate_stats->table[rate_idx]) + rate_stats->rate_cnt++; + rate_stats->table[rate_idx]++; + rate_stats->cpt++; + } else { + wiphy_err(rwnx_hw->wiphy, "RX: Invalid index conversion => %d/%d\n", + rate_idx, rate_stats->size); + } +#endif +} + +/** + * rwnx_rx_data_skb - Process one data frame + * + * @rwnx_hw: main driver data + * @rwnx_vif: vif that received the buffer + * @skb: skb received + * @rxhdr: HW rx descriptor + * @return: true if buffer has been forwarded to upper layer + * + * If buffer is amsdu , it is first split into a list of skb. + * Then each skb may be: + * - forwarded to upper layer + * - resent on wireless interface + * + * When vif is a STA interface, every skb is only forwarded to upper layer. + * When vif is an AP interface, multicast skb are forwarded and resent, whereas + * skb for other BSS's STA are only resent. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#endif /* LINUX_VERSION_CODE */ + +void rwnx_rx_data_skb_resend(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, struct sk_buff *skb) +{ + struct sk_buff *rx_skb = skb; + //bool amsdu = rxhdr->flags_is_amsdu; + const struct ethhdr *eth; + struct sk_buff *skb_copy; + + rx_skb->dev = rwnx_vif->ndev; + skb_reset_mac_header(rx_skb); + eth = eth_hdr(rx_skb); + + //printk("resend\n"); + /* resend pkt on wireless interface */ + /* always need to copy buffer when forward=0 to get enough headrom for tsdesc */ + skb_copy = skb_copy_expand(rx_skb, sizeof(struct rwnx_txhdr) + + RWNX_SWTXHDR_ALIGN_SZ + 3 + 24 + 8, 0, GFP_ATOMIC); + + if (skb_copy) { + int res; + skb_copy->protocol = htons(ETH_P_802_3); + skb_reset_network_header(skb_copy); + skb_reset_mac_header(skb_copy); + + rwnx_vif->is_resending = true; + res = dev_queue_xmit(skb_copy); + rwnx_vif->is_resending = false; + /* note: buffer is always consummed by dev_queue_xmit */ + if (res == NET_XMIT_DROP) { + rwnx_vif->net_stats.rx_dropped++; + rwnx_vif->net_stats.tx_dropped++; + } else if (res != NET_XMIT_SUCCESS) { + netdev_err(rwnx_vif->ndev, + "Failed to re-send buffer to driver (res=%d)", + res); + rwnx_vif->net_stats.tx_errors++; + } + } else { + netdev_err(rwnx_vif->ndev, "Failed to copy skb"); + } +} +#ifdef AICWF_RX_REORDER +static void rwnx_rx_data_skb_forward(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, struct sk_buff *skb) +{ + struct sk_buff *rx_skb; + + rx_skb = skb; + rx_skb->dev = rwnx_vif->ndev; + skb_reset_mac_header(rx_skb); + + /* Update statistics */ + rwnx_vif->net_stats.rx_packets++; + rwnx_vif->net_stats.rx_bytes += rx_skb->len; + + //printk("forward\n"); +#ifdef CONFIG_BR_SUPPORT + void *br_port = NULL; + + if (1) {//(check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) { + /* Insert NAT2.5 RX here! */ + #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) + br_port = rwnx_vif->ndev->br_port; + #else + rcu_read_lock(); + br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data); + rcu_read_unlock(); + #endif + + if (br_port) { + int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb); + + if (nat25_handle_frame(rwnx_vif, rx_skb) == -1) { + /* priv->ext_stats.rx_data_drops++; */ + /* DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); */ + /* return FAIL; */ + + } + } + } +#endif /* CONFIG_BR_SUPPORT */ + + rwnx_skb_align_8bytes(rx_skb); + + rx_skb->protocol = eth_type_trans(rx_skb, rwnx_vif->ndev); + memset(rx_skb->cb, 0, sizeof(rx_skb->cb)); + REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); + + +#ifdef CONFIG_FILTER_TCP_ACK + filter_rx_tcp_ack(rwnx_hw, rx_skb->data, cpu_to_le16(rx_skb->len)); +#endif + + #ifdef CONFIG_RX_NETIF_RECV_SKB //modify by aic + local_bh_disable(); + netif_receive_skb(rx_skb); + local_bh_enable(); + #else + if (in_interrupt()) { + netif_rx(rx_skb); + } else { + /* + * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ. + * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually. + */ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(rx_skb); + #else + ulong flags; + netif_rx(rx_skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); + #endif + } + #endif + REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); + + rwnx_hw->stats.last_rx = jiffies; +} +#endif + + +static bool rwnx_rx_data_skb(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, struct hw_rxhdr *rxhdr) +{ + struct sk_buff_head list; + struct sk_buff *rx_skb; + bool amsdu = rxhdr->flags_is_amsdu; + bool resend = false, forward = true; + u8 flags_vif_idx = rxhdr->flags_vif_idx; + u8 flags_dst_idx = rxhdr->flags_dst_idx; + + skb->dev = rwnx_vif->ndev; + + __skb_queue_head_init(&list); + + if (amsdu) { + #if 1 + rwnx_rxdata_process_amsdu(rwnx_hw, skb, flags_vif_idx, &list); + #else + int count; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0) + ieee80211_amsdu_to_8023s(skb, &list, rwnx_vif->ndev->dev_addr, + RWNX_VIF_TYPE(rwnx_vif), 0, NULL, NULL, false); +#else + ieee80211_amsdu_to_8023s(skb, &list, rwnx_vif->ndev->dev_addr, + RWNX_VIF_TYPE(rwnx_vif), 0, NULL, NULL); +#endif + + count = skb_queue_len(&list); + if (count > ARRAY_SIZE(rwnx_hw->stats.amsdus_rx)) + count = ARRAY_SIZE(rwnx_hw->stats.amsdus_rx); + rwnx_hw->stats.amsdus_rx[count - 1]++; + #endif + } else { + rwnx_hw->stats.amsdus_rx[0]++; + __skb_queue_head(&list, skb); + } + + if (((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP) || + (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN) || + (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO)) && + !(rwnx_vif->ap.flags & RWNX_AP_ISOLATE)) { + const struct ethhdr *eth; + rx_skb = skb_peek(&list); + skb_reset_mac_header(rx_skb); + eth = eth_hdr(rx_skb); + + if (unlikely(is_multicast_ether_addr(eth->h_dest))) { + /* broadcast pkt need to be forwared to upper layer and resent + on wireless interface */ + resend = true; + } else { + /* unicast pkt for STA inside the BSS, no need to forward to upper + layer simply resend on wireless interface */ + if (flags_dst_idx != RWNX_INVALID_STA) + { + struct rwnx_sta *sta = &rwnx_hw->sta_table[flags_dst_idx]; + if (sta->valid && (sta->vlan_idx == rwnx_vif->vif_index)) + { + forward = false; + resend = true; + } + } + } + } else if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT) { + const struct ethhdr *eth; + rx_skb = skb_peek(&list); + skb_reset_mac_header(rx_skb); + eth = eth_hdr(rx_skb); + + if (!is_multicast_ether_addr(eth->h_dest)) { + /* unicast pkt for STA inside the BSS, no need to forward to upper + layer simply resend on wireless interface */ + if (flags_dst_idx != RWNX_INVALID_STA) + { + forward = false; + resend = true; + } + } + } + + while (!skb_queue_empty(&list)) { + rx_skb = __skb_dequeue(&list); + + /* resend pkt on wireless interface */ + if (resend) { + struct sk_buff *skb_copy; + /* always need to copy buffer when forward=0 to get enough headrom for tsdesc */ + skb_copy = skb_copy_expand(rx_skb, sizeof(struct rwnx_txhdr) + + RWNX_SWTXHDR_ALIGN_SZ + 3 + 24 + 8, 0, GFP_ATOMIC); + + if (skb_copy) { + int res; + skb_copy->protocol = htons(ETH_P_802_3); + skb_reset_network_header(skb_copy); + skb_reset_mac_header(skb_copy); + + rwnx_vif->is_resending = true; + res = dev_queue_xmit(skb_copy); + rwnx_vif->is_resending = false; + /* note: buffer is always consummed by dev_queue_xmit */ + if (res == NET_XMIT_DROP) { + rwnx_vif->net_stats.rx_dropped++; + rwnx_vif->net_stats.tx_dropped++; + } else if (res != NET_XMIT_SUCCESS) { + netdev_err(rwnx_vif->ndev, + "Failed to re-send buffer to driver (res=%d)", + res); + rwnx_vif->net_stats.tx_errors++; + } + } else { + netdev_err(rwnx_vif->ndev, "Failed to copy skb"); + } + } + + /* forward pkt to upper layer */ + if (forward) { +#ifdef CONFIG_BR_SUPPORT + void *br_port = NULL; + + + /* Update statistics */ + rwnx_vif->net_stats.rx_packets++; + rwnx_vif->net_stats.rx_bytes += rx_skb->len; + + if (1) {//(check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) { + /* Insert NAT2.5 RX here! */ +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) + br_port = rwnx_vif->ndev->br_port; +#else + rcu_read_lock(); + br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data); + rcu_read_unlock(); +#endif + + if (br_port) { + int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb); + + if (nat25_handle_frame(rwnx_vif, rx_skb) == -1) { + /* priv->ext_stats.rx_data_drops++; */ + /* DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); */ + /* return FAIL; */ + } + } + } +#endif /* CONFIG_BR_SUPPORT */ + + rwnx_skb_align_8bytes(rx_skb); + + rx_skb->protocol = eth_type_trans(rx_skb, rwnx_vif->ndev); + +#ifdef AICWF_ARP_OFFLOAD + if(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) + arpoffload_proc(rx_skb, rwnx_vif); +#endif + memset(rx_skb->cb, 0, sizeof(rx_skb->cb)); + REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); + + +#ifdef CONFIG_FILTER_TCP_ACK + filter_rx_tcp_ack(rwnx_hw, rx_skb->data, cpu_to_le16(rx_skb->len)); +#endif + + #ifdef CONFIG_RX_NETIF_RECV_SKB //modify by aic + local_bh_disable(); + netif_receive_skb(rx_skb); + local_bh_enable(); + #else + if (in_interrupt()) { + netif_rx(rx_skb); + } else { + /* + * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ. + * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually. + */ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(rx_skb); + #else + ulong flags; + netif_rx(rx_skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); + #endif + } + #endif + REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); + + rwnx_hw->stats.last_rx = jiffies; + } + } + + return forward; +} + +#ifdef CONFIG_HE_FOR_OLD_KERNEL +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) +const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len, + const u8 *match, int match_len, + int match_offset) +{ + const struct element *elem; + + /* match_offset can't be smaller than 2, unless match_len is + * zero, in which case match_offset must be zero as well. + */ + if (WARN_ON((match_len && match_offset < 2) || + (!match_len && match_offset))) + return NULL; + + for_each_element_id(elem, eid, ies, len) { + if (elem->datalen >= match_offset - 2 + match_len && + !memcmp(elem->data + match_offset - 2, match, match_len)) + return (void *)elem; + } + + return NULL; +} +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + +static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8* ies, int len) +{ + return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len, + &ext_eid, 1, 2); +} +#endif + +#endif + +#ifdef CONFIG_BAND_STEERING +static void handle_op_class_band(struct tmp_feature_sta *f_sta, const u8 *ie, int ie_len) +{ + int op_num, i, j; + u8 op_class; + + if (!ie) { + return; + } + + op_num = ie[1]; + for (i = 0; i < op_num; i++) { + op_class = ie[i + 2]; + for (j = 0; j < ARRAY_SIZE(g_op_class); j++) { + if (op_class == g_op_class[j].op_class) { + switch (g_op_class[j].band) { + case BAND_ON_24G: + f_sta->supported_band |= BAND_CAP_2G; + break; + case BAND_ON_5G: + f_sta->supported_band |= BAND_CAP_5G; + break; + default: + break; + } + break; + } + } + } +} + +static void set_band_by_freq(struct tmp_feature_sta *f_sta, u16 freq) +{ + if (freq >= 5180) { + f_sta->supported_band |= BAND_CAP_5G; + } else { + f_sta->supported_band |= BAND_CAP_2G; + } +} + +static void handle_assoc_request(struct rwnx_vif *rwnx_vif, + struct tmp_feature_sta *f_sta, + struct ieee80211_mgmt *mgmt, + struct sk_buff *skb, + struct rx_vector_1 *rxvect, + struct hw_rxhdr *hw_rxhdr, + bool is_reassoc) +{ + const u8 *ie; + u8 *variable_field; + size_t variable_len; + + aicwf_nl_send_frame_rpt_msg(rwnx_vif, WIFI_ASSOCREQ, mgmt->sa, rxvect->rssi1); + f_sta->sta_idx = rwnx_vif->ap.tmp_sta_idx; + + if (is_reassoc) { + variable_field = mgmt->u.reassoc_req.variable; + } else { + variable_field = mgmt->u.assoc_req.variable; + } + variable_len = skb->len - (variable_field - skb->data); + + ie = cfg80211_find_ie(WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + variable_field, variable_len); + + if (ie) { + handle_op_class_band(f_sta, ie, variable_len); + AICWFDBG(LOGINFO, "%s usb %sassoc supported_band: %u\n", __func__, + is_reassoc ? "re" : "", f_sta->supported_band); + } else { + AICWFDBG(LOGINFO, "%s usb %sassoc no find op_class\n", __func__, is_reassoc ? "re" : ""); + set_band_by_freq(f_sta, hw_rxhdr->phy_info.phy_prim20_freq); + } +} +#endif + +/** + * rwnx_rx_mgmt - Process one 802.11 management frame + * + * @rwnx_hw: main driver data + * @rwnx_vif: vif to upload the buffer to + * @skb: skb received + * @rxhdr: HW rx descriptor + * + * Forward the management frame to a given interface. + */ +static void rwnx_rx_mgmt(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, struct hw_rxhdr *hw_rxhdr) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct rx_vector_1 *rxvect = &hw_rxhdr->hwvect.rx_vect1; +#ifdef CONFIG_HE_FOR_OLD_KERNEL + struct ieee80211_he_cap_elem *he; + struct ieee80211_he_mcs_nss_supp *he_mcs; +#endif + + //printk("rwnx_rx_mgmt\n"); + if(skb->data[0]!=0x80 && skb->data[0] != 0x40) + AICWFDBG(LOGDEBUG,"rxmgmt:%x,%x\n", skb->data[0], skb->data[1]); + +#ifdef CONFIG_BAND_STEERING + bool queued = false; + struct tmp_feature_sta *f_sta = &rwnx_hw->feature_table[rwnx_vif->ap.tmp_sta_idx]; + if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP) { +#if 0 + if(skb->data[0] != 0x80) + printk("rx mgmt:%02x\n", mgmt->frame_control); +#endif + + if (ieee80211_is_assoc_req(mgmt->frame_control)) { + handle_assoc_request(rwnx_vif, f_sta, mgmt, skb, rxvect, hw_rxhdr, false); + } else if (ieee80211_is_reassoc_req(mgmt->frame_control)) { + handle_assoc_request(rwnx_vif, f_sta, mgmt, skb, rxvect, hw_rxhdr, true); + } + + if (ieee80211_is_auth(mgmt->frame_control)) { + aicwf_nl_send_frame_rpt_msg(rwnx_vif, WIFI_AUTH, mgmt->sa, rxvect->rssi1); + } + + if (ieee80211_is_probe_req(mgmt->frame_control)) { + if (!rwnx_vif->ap.start) + return; + aicwf_nl_send_frame_rpt_msg(rwnx_vif, WIFI_PROBEREQ, mgmt->sa, rxvect->rssi1); + AICWFDBG(LOGSTEER, "usb rx p_req: %pM\n", mgmt->sa); + for (int i = 0; i < MAX_PENDING_PROBES; i++) { + if (!rwnx_vif->pb_pool[i].in_use) { + rwnx_vif->pb_pool[i].in_use = true; + memcpy(rwnx_vif->pb_pool[i].da, mgmt->sa, 6); + queue_work(rwnx_vif->rsp_wq, &rwnx_vif->pb_pool[i].rsp_work); + queued = true; + break; + } + } + if (!queued) + AICWFDBG(LOGERROR, "usb probe_rsp pool full, drop %pM\n", mgmt->sa); + return; + } + + } +#endif + +#if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL) + struct aic_sta *sta = &rwnx_hw->aic_table[rwnx_vif->ap.aic_index]; + const u8* ie; + u32 len; + + if (ieee80211_is_assoc_req(mgmt->frame_control) && rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) { + printk("ASSOC_REQ: sta_idx %d MAC %pM\n", rwnx_vif->ap.aic_index, mgmt->sa); + sta->sta_idx = rwnx_vif->ap.aic_index; + len = skb->len - (mgmt->u.assoc_req.variable - skb->data); + + #ifdef CONFIG_HE_FOR_OLD_KERNEL + ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, mgmt->u.assoc_req.variable, len); + if (ie && ie[1] >= sizeof(*he) + 1) { + printk("assoc_req: find he\n"); + he = (struct ieee80211_he_cap_elem *)(ie+3); + he_mcs = (struct ieee80211_he_mcs_nss_supp *)((u8 *)he + sizeof(*he)); + memcpy(&sta->he_cap_elem,ie+3,sizeof(struct ieee80211_he_cap_elem)); + + sta->he_mcs_nss_supp.rx_mcs_80 = he_mcs->rx_mcs_80; + sta->he_mcs_nss_supp.tx_mcs_80 = he_mcs->tx_mcs_80; + sta->he = true; + } + else { + printk("assoc_req: no find he\n"); + sta->he = false; + } + #endif + + #ifdef CONFIG_VHT_FOR_OLD_KERNEL + struct ieee80211_vht_cap *vht; + ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.assoc_req.variable, len); + if (ie && ie[1] >= sizeof(*vht)) { + printk("assoc_req: find vht\n"); + memcpy(&sta->vht_cap_info,ie+2,4); + memcpy(&sta->supp_mcs,ie+2+4,sizeof(struct ieee80211_vht_mcs_info)); + sta->vht = true; + } else { + printk("assoc_req: no find vht\n"); + sta->vht = false; + } + #endif + } + if (ieee80211_is_deauth(mgmt->frame_control) && rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) { + printk("DEAUTH: sta_idx %d MAC %pM reason:%x\n", rwnx_vif->ap.aic_index, mgmt->sa, mgmt->u.deauth.reason_code); + } + if (ieee80211_is_disassoc(mgmt->frame_control) && rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) { + printk("DISASSOC: sta_idx %d MAC %pM reason:%x\n", rwnx_vif->ap.aic_index, mgmt->sa, mgmt->u.disassoc.reason_code); + } +#endif + + /*if (ieee80211_is_mgmt(mgmt->frame_control) && + (skb->len <= 24 || skb->len > 768)) { + printk("mgmt err\n"); + return; + }*/ + + if (ieee80211_is_beacon(mgmt->frame_control)) { + if ((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT) && + hw_rxhdr->flags_new_peer) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + cfg80211_notify_new_peer_candidate(rwnx_vif->ndev, mgmt->sa, + mgmt->u.beacon.variable, + skb->len - offsetof(struct ieee80211_mgmt, + u.beacon.variable), + GFP_ATOMIC); +#else + /* TODO: the value of parameter sig_dbm need to be confirmed */ + cfg80211_notify_new_peer_candidate(rwnx_vif->ndev, mgmt->sa, + mgmt->u.beacon.variable, + skb->len - offsetof(struct ieee80211_mgmt, + u.beacon.variable), + rxvect->rssi1, GFP_ATOMIC); +#endif + } else { + + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) + cfg80211_report_obss_beacon(rwnx_hw->wiphy, skb->data, skb->len, + hw_rxhdr->phy_info.phy_prim20_freq, + rxvect->rssi1, GFP_KERNEL); + #else + cfg80211_report_obss_beacon(rwnx_hw->wiphy, skb->data, skb->len, + hw_rxhdr->phy_info.phy_prim20_freq, + rxvect->rssi1); + #endif + } + } else if ((ieee80211_is_deauth(mgmt->frame_control) || + ieee80211_is_disassoc(mgmt->frame_control)) && + (mgmt->u.deauth.reason_code == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA || + mgmt->u.deauth.reason_code == WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA)) { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) // TODO: process unprot mgmt + cfg80211_rx_unprot_mlme_mgmt(rwnx_vif->ndev, skb->data, skb->len); + #endif + } else if ((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION) && + (ieee80211_is_action(mgmt->frame_control) && + (mgmt->u.action.category == 6))) { + struct cfg80211_ft_event_params ft_event; + ft_event.target_ap = (uint8_t *)&mgmt->u.action + ETH_ALEN + 2; + ft_event.ies = (uint8_t *)&mgmt->u.action + ETH_ALEN * 2 + 2; + ft_event.ies_len = skb->len - (ft_event.ies - (uint8_t *)mgmt); + ft_event.ric_ies = NULL; + ft_event.ric_ies_len = 0; + cfg80211_ft_event(rwnx_vif->ndev, &ft_event); + } else { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + cfg80211_rx_mgmt(&rwnx_vif->wdev, hw_rxhdr->phy_info.phy_prim20_freq, + rxvect->rssi1, skb->data, skb->len, 0); +#else + cfg80211_rx_mgmt(rwnx_vif->ndev, hw_rxhdr->phy_info.phy_prim20_freq, + rxvect->rssi1, skb->data, skb->len, 0); +#endif + } +} + +/** + * rwnx_rx_mgmt_any - Process one 802.11 management frame + * + * @rwnx_hw: main driver data + * @skb: skb received + * @rxhdr: HW rx descriptor + * + * Process the management frame and free the corresponding skb. + * If vif is not specified in the rx descriptor, the the frame is uploaded + * on all active vifs. + */ +static void rwnx_rx_mgmt_any(struct rwnx_hw *rwnx_hw, struct sk_buff *skb, + struct hw_rxhdr *hw_rxhdr) +{ + struct rwnx_vif *rwnx_vif; + int vif_idx = hw_rxhdr->flags_vif_idx; +#ifdef CREATE_TRACE_POINTS + trace_mgmt_rx(hw_rxhdr->phy_info.phy_prim20_freq, vif_idx, + hw_rxhdr->flags_sta_idx, (struct ieee80211_mgmt *)skb->data); +#endif + + if (vif_idx == RWNX_INVALID_VIF) { + list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { + if (! rwnx_vif->up) + continue; + rwnx_rx_mgmt(rwnx_hw, rwnx_vif, skb, hw_rxhdr); + } + } else { + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, vif_idx); + if (rwnx_vif) + rwnx_rx_mgmt(rwnx_hw, rwnx_vif, skb, hw_rxhdr); + } + + dev_kfree_skb(skb); +} + +/** + * rwnx_rx_rtap_hdrlen - Return radiotap header length + * + * @rxvect: Rx vector used to fill the radiotap header + * @has_vend_rtap: boolean indicating if vendor specific data is present + * + * Compute the length of the radiotap header based on @rxvect and vendor + * specific data (if any). + */ +static u8 rwnx_rx_rtap_hdrlen(struct rx_vector_1 *rxvect, + bool has_vend_rtap) +{ + u8 rtap_len; + + /* Compute radiotap header length */ + rtap_len = sizeof(struct ieee80211_radiotap_header) + 8; + + // Check for multiple antennas + if (hweight32(rxvect->antenna_set) > 1) + // antenna and antenna signal fields + rtap_len += 4 * hweight8(rxvect->antenna_set); + + // TSFT + if (!has_vend_rtap) { + rtap_len = ALIGN(rtap_len, 8); + rtap_len += 8; + } + + // IEEE80211_HW_SIGNAL_DBM + rtap_len++; + + // Check if single antenna + if (hweight32(rxvect->antenna_set) == 1) + rtap_len++; //Single antenna + + // padding for RX FLAGS + rtap_len = ALIGN(rtap_len, 2); + + // Check for HT frames + if ((rxvect->format_mod == FORMATMOD_HT_MF) || + (rxvect->format_mod == FORMATMOD_HT_GF)) + rtap_len += 3; + + // Check for AMPDU + if (!(has_vend_rtap) && ((rxvect->format_mod >= FORMATMOD_VHT) || + ((rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM) && + (rxvect->ht.aggregation)))) { + rtap_len = ALIGN(rtap_len, 4); + rtap_len += 8; + } + + // Check for VHT frames + if (rxvect->format_mod == FORMATMOD_VHT) { + rtap_len = ALIGN(rtap_len, 2); + rtap_len += 12; + } + + // Check for HE frames + if (rxvect->format_mod == FORMATMOD_HE_SU) { + rtap_len = ALIGN(rtap_len, 2); + rtap_len += sizeof(struct ieee80211_radiotap_he); + } + + // Check for multiple antennas + if (hweight32(rxvect->antenna_set) > 1) { + // antenna and antenna signal fields + rtap_len += 2 * hweight8(rxvect->antenna_set); + } + + // Check for vendor specific data + if (has_vend_rtap) { + /* vendor presence bitmap */ + rtap_len += 4; + /* alignment for fixed 6-byte vendor data header */ + rtap_len = ALIGN(rtap_len, 2); + } + + return rtap_len; +} + +/** + * rwnx_rx_add_rtap_hdr - Add radiotap header to sk_buff + * + * @rwnx_hw: main driver data + * @skb: skb received (will include the radiotap header) + * @rxvect: Rx vector + * @phy_info: Information regarding the phy + * @hwvect: HW Info (NULL if vendor specific data is available) + * @rtap_len: Length of the radiotap header + * @vend_rtap_len: radiotap vendor length (0 if not present) + * @vend_it_present: radiotap vendor present + * + * Builds a radiotap header and add it to @skb. + */ +static void rwnx_rx_add_rtap_hdr(struct rwnx_hw* rwnx_hw, + struct sk_buff *skb, + struct rx_vector_1 *rxvect, + struct phy_channel_info_desc *phy_info, + struct hw_vect *hwvect, + int rtap_len, + u8 vend_rtap_len, + u32 vend_it_present) +{ + struct ieee80211_radiotap_header *rtap; + u8 *pos, rate_idx; + __le32 *it_present; + u32 it_present_val = 0; + bool fec_coding = false; + bool short_gi = false; + bool stbc = false; + bool aggregation = false; + + rtap = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); + memset((u8*) rtap, 0, rtap_len); + + rtap->it_version = 0; + rtap->it_pad = 0; + rtap->it_len = cpu_to_le16(rtap_len + vend_rtap_len); + + it_present = &rtap->it_present; + + // Check for multiple antennas + if (hweight32(rxvect->antenna_set) > 1) { + int chain; + unsigned long chains = rxvect->antenna_set; + + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { + it_present_val |= + BIT(IEEE80211_RADIOTAP_EXT) | + BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); + put_unaligned_le32(it_present_val, it_present); + it_present++; + it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | + BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + } + } + + // Check if vendor specific data is present + if (vend_rtap_len) { + it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | + BIT(IEEE80211_RADIOTAP_EXT); + put_unaligned_le32(it_present_val, it_present); + it_present++; + it_present_val = vend_it_present; + } + + put_unaligned_le32(it_present_val, it_present); + pos = (void *)(it_present + 1); + + // IEEE80211_RADIOTAP_TSFT + if (hwvect) { + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); + // padding + while ((pos - (u8 *)rtap) & 7) + *pos++ = 0; + put_unaligned_le64((((u64)le32_to_cpu(hwvect->tsf_hi) << 32) + + (u64)le32_to_cpu(hwvect->tsf_lo)), pos); + pos += 8; + } + + // IEEE80211_RADIOTAP_FLAGS + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_FLAGS); + if (hwvect && (!hwvect->frm_successful_rx)) + *pos |= IEEE80211_RADIOTAP_F_BADFCS; + if (!rxvect->pre_type + && (rxvect->format_mod <= FORMATMOD_NON_HT_DUP_OFDM)) + *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; + pos++; + + // IEEE80211_RADIOTAP_RATE + // check for HT, VHT or HE frames + if (rxvect->format_mod >= FORMATMOD_HE_SU) { + rate_idx = rxvect->he.mcs; + fec_coding = rxvect->he.fec; + stbc = rxvect->he.stbc; + aggregation = true; + *pos = 0; + } else if (rxvect->format_mod == FORMATMOD_VHT) { + rate_idx = rxvect->vht.mcs; + fec_coding = rxvect->vht.fec; + short_gi = rxvect->vht.short_gi; + stbc = rxvect->vht.stbc; + aggregation = true; + *pos = 0; + } else if (rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM) { + rate_idx = rxvect->ht.mcs; + fec_coding = rxvect->ht.fec; + short_gi = rxvect->ht.short_gi; + stbc = rxvect->ht.stbc; + aggregation = rxvect->ht.aggregation; + *pos = 0; + } else { + struct ieee80211_supported_band* band = + rwnx_hw->wiphy->bands[phy_info->phy_band]; + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); + BUG_ON((rate_idx = legrates_lut[rxvect->leg_rate].idx) == -1); + if (phy_info->phy_band == NL80211_BAND_5GHZ) + rate_idx -= 4; /* rwnx_ratetable_5ghz[0].hw_value == 4 */ + *pos = DIV_ROUND_UP(band->bitrates[rate_idx].bitrate, 5); + } + pos++; + + // IEEE80211_RADIOTAP_CHANNEL + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); + put_unaligned_le16(phy_info->phy_prim20_freq, pos); + pos += 2; + + if (phy_info->phy_band == NL80211_BAND_5GHZ) + put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, pos); + else if (rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM) + put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, pos); + else + put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, pos); + pos += 2; + + if (hweight32(rxvect->antenna_set) == 1) { + // IEEE80211_RADIOTAP_DBM_ANTSIGNAL + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + *pos++ = rxvect->rssi1; + + // IEEE80211_RADIOTAP_ANTENNA + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ANTENNA); + *pos++ = rxvect->antenna_set; + } + + // IEEE80211_RADIOTAP_LOCK_QUALITY is missing + // IEEE80211_RADIOTAP_DB_ANTNOISE is missing + + // IEEE80211_RADIOTAP_RX_FLAGS + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RX_FLAGS); + // 2 byte alignment + if ((pos - (u8 *)rtap) & 1) + *pos++ = 0; + put_unaligned_le16(0, pos); + //Right now, we only support fcs error (no RX_FLAG_FAILED_PLCP_CRC) + pos += 2; + + // Check if HT + if ((rxvect->format_mod == FORMATMOD_HT_MF) + || (rxvect->format_mod == FORMATMOD_HT_GF)) { + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); + *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI | + IEEE80211_RADIOTAP_MCS_HAVE_BW; + *pos = 0; + if (short_gi) + *pos |= IEEE80211_RADIOTAP_MCS_SGI; + if (rxvect->ch_bw == PHY_CHNL_BW_40) + *pos |= IEEE80211_RADIOTAP_MCS_BW_40; + if (rxvect->format_mod == FORMATMOD_HT_GF) + *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; + if (fec_coding) + *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + *pos++ |= stbc << 5; + #else + *pos++ |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; + #endif + *pos++ = rate_idx; + } + + // check for HT or VHT frames + if (aggregation && hwvect) { + // 4 byte alignment + while ((pos - (u8 *)rtap) & 3) + pos++; + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); + put_unaligned_le32(hwvect->ampdu_cnt, pos); + pos += 4; + put_unaligned_le32(0, pos); + pos += 4; + } + + // Check for VHT frames + if (rxvect->format_mod == FORMATMOD_VHT) { + u16 vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; + u8 vht_nss = rxvect->vht.nss + 1; + + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); + + if ((rxvect->ch_bw == PHY_CHNL_BW_160) + && phy_info->phy_center2_freq) + vht_details &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; + put_unaligned_le16(vht_details, pos); + pos += 2; + + // flags + if (short_gi) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + if (stbc) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; + pos++; + + // bandwidth + if (rxvect->ch_bw == PHY_CHNL_BW_40) + *pos++ = 1; + if (rxvect->ch_bw == PHY_CHNL_BW_80) + *pos++ = 4; + else if ((rxvect->ch_bw == PHY_CHNL_BW_160) + && phy_info->phy_center2_freq) + *pos++ = 0; //80P80 + else if (rxvect->ch_bw == PHY_CHNL_BW_160) + *pos++ = 11; + else // 20 MHz + *pos++ = 0; + + // MCS/NSS + *pos = (rate_idx << 4) | vht_nss; + pos += 4; + if (fec_coding) + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) + *pos |= 0x01; + #else + *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; + #endif + pos++; + // group ID + pos++; + // partial_aid + pos += 2; + } + + // Check for HE frames + if (rxvect->format_mod == FORMATMOD_HE_SU) { + struct ieee80211_radiotap_he he; + #define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val)) + #define D1_KNOWN(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_##f##_KNOWN) + #define D2_KNOWN(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_##f##_KNOWN) + + he.data1 = D1_KNOWN(DATA_MCS) | D1_KNOWN(BSS_COLOR) | D1_KNOWN(BEAM_CHANGE) | + D1_KNOWN(UL_DL) | D1_KNOWN(CODING) | D1_KNOWN(STBC) | + D1_KNOWN(BW_RU_ALLOC) | D1_KNOWN(DOPPLER) | D1_KNOWN(DATA_DCM); + he.data2 = D2_KNOWN(GI) | D2_KNOWN(TXBF); + + if (stbc) { + he.data6 |= HE_PREP(DATA6_NSTS, 2); + he.data3 |= HE_PREP(DATA3_STBC, 1); + } else { + he.data6 |= HE_PREP(DATA6_NSTS, rxvect->he.nss); + } + + he.data3 |= HE_PREP(DATA3_BSS_COLOR, rxvect->he.bss_color); + he.data3 |= HE_PREP(DATA3_BEAM_CHANGE, rxvect->he.beam_change); + he.data3 |= HE_PREP(DATA3_UL_DL, rxvect->he.uplink_flag); + he.data3 |= HE_PREP(DATA3_BSS_COLOR, rxvect->he.bss_color); + he.data3 |= HE_PREP(DATA3_DATA_MCS, rxvect->he.mcs); + he.data3 |= HE_PREP(DATA3_DATA_DCM, rxvect->he.dcm); + he.data3 |= HE_PREP(DATA3_CODING, rxvect->he.fec); + + he.data5 |= HE_PREP(DATA5_GI, rxvect->he.gi_type); + he.data5 |= HE_PREP(DATA5_TXBF, rxvect->he.beamformed); + he.data5 |= HE_PREP(DATA5_LTF_SIZE, rxvect->he.he_ltf_type + 1); + + switch (rxvect->ch_bw) { + case PHY_CHNL_BW_20: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); + break; + case PHY_CHNL_BW_40: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); + break; + case PHY_CHNL_BW_80: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); + break; + case PHY_CHNL_BW_160: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); + break; + default: + WARN_ONCE(1, "Invalid SU BW %d\n", rxvect->ch_bw); + } + + he.data6 |= HE_PREP(DATA6_DOPPLER, rxvect->he.doppler); + + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rtap) & 1) + pos++; + rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); + memcpy(pos, &he, sizeof(he)); + pos += sizeof(he); + } + + // Rx Chains + if (hweight32(rxvect->antenna_set) > 1) { + int chain; + unsigned long chains = rxvect->antenna_set; + u8 rssis[4] = {rxvect->rssi1, rxvect->rssi1, rxvect->rssi1, rxvect->rssi1}; + + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { + *pos++ = rssis[chain]; + *pos++ = chain; + } + } +} + +/** + * rwnx_rx_monitor - Build radiotap header for skb an send it to netdev + * + * @rwnx_hw: main driver data + * @rwnx_vif: vif that received the buffer + * @skb: sk_buff received + * @hw_rxhdr_ptr: Pointer to HW RX header + * @rtap_len: Radiotap Header length + * + * Add radiotap header to the receved skb and send it to netdev + */ +static int rwnx_rx_monitor(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, struct hw_rxhdr *hw_rxhdr_ptr, + u8 rtap_len) +{ + skb->dev = rwnx_vif->ndev; + + if (rwnx_vif->wdev.iftype != NL80211_IFTYPE_MONITOR) { + netdev_err(rwnx_vif->ndev, "not a monitor vif\n"); + return -1; + } + + /* Add RadioTap Header */ + rwnx_rx_add_rtap_hdr(rwnx_hw, skb, &hw_rxhdr_ptr->hwvect.rx_vect1, + &hw_rxhdr_ptr->phy_info, &hw_rxhdr_ptr->hwvect, + rtap_len, 0, 0); + + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + +#ifdef CONFIG_FILTER_TCP_ACK + filter_rx_tcp_ack(rwnx_hw, skb->data, cpu_to_le16(skb->len)); +#endif + + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + + return 0; +} + +#ifdef AICWF_ARP_OFFLOAD +void arpoffload_proc(struct sk_buff *skb, struct rwnx_vif *rwnx_vif) +{ + struct iphdr *iphead = (struct iphdr *)(skb->data); + struct udphdr *udph; + struct DHCPInfo *dhcph; + + if(skb->protocol == htons(ETH_P_IP)) { // IP + if(iphead->protocol == IPPROTO_UDP) { // UDP + udph = (struct udphdr *)((u8 *)iphead + (iphead->ihl << 2)); + if((udph->source == __constant_htons(SERVER_PORT)) + && (udph->dest == __constant_htons(CLIENT_PORT))) { // DHCP offset/ack + AICWFDBG(LOGDEBUG, "dhcp: offer / ack\n"); + dhcph = (struct DHCPInfo *)((u8 *)udph + sizeof(struct udphdr)); + if(dhcph->cookie == htonl(DHCP_MAGIC) && dhcph->op == 2 && + !memcmp(dhcph->chaddr, rwnx_vif->ndev->dev_addr, 6)) { // match magic word + u32 length = ntohs(udph->len) - sizeof(struct udphdr) - offsetof(struct DHCPInfo, options); + u16 offset = 0; + u8 *option = dhcph->options; + while (option[offset]!= DHCP_OPTION_END && offsetsta.paired_cipher_type == WLAN_CIPHER_SUITE_CCMP || \ + rwnx_vif->sta.paired_cipher_type == WLAN_CIPHER_SUITE_AES_CMAC || \ + ((rwnx_vif->sta.group_cipher_type == 0xff) && \ + (rwnx_vif->sta.paired_cipher_type == 0xff))) + rwnx_send_arpoffload_en_req(rwnx_vif->rwnx_hw, rwnx_vif, dhcph->yiaddr, 1); + else + rwnx_send_arpoffload_en_req(rwnx_vif->rwnx_hw, rwnx_vif, dhcph->yiaddr, 0); + } + } + offset += 2 + option[offset+1]; + } + } + } + } + } +} +#endif + +#ifdef AICWF_RX_REORDER +void reord_rxframe_free(spinlock_t *lock, struct list_head *q, struct list_head *list) +{ + spin_lock_bh(lock); + list_add(list, q); + spin_unlock_bh(lock); +} + +struct recv_msdu *reord_rxframe_alloc(spinlock_t *lock, struct list_head *q) +{ + struct recv_msdu *rxframe; + + spin_lock_bh(lock); + if (list_empty(q)) { + spin_unlock_bh(lock); + return NULL; + } + rxframe = list_entry(q->next, struct recv_msdu, rxframe_list); + list_del_init(q->next); + spin_unlock_bh(lock); + return rxframe; +} + +struct reord_ctrl_info *reord_init_sta(struct aicwf_rx_priv* rx_priv, const u8 *mac_addr) +{ + u8 i = 0; + struct reord_ctrl *preorder_ctrl = NULL; + struct reord_ctrl_info *reord_info; +#ifdef AICWF_SDIO_SUPPORT + struct aicwf_bus *bus_if = rx_priv->sdiodev->bus_if; +#else + struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if; +#endif + + if (bus_if->state == BUS_DOWN_ST || rx_priv == NULL) { + printk("bad stat!\n"); + return NULL; + } + + AICWFDBG(LOGINFO, "reord_init_sta:%pM\n", mac_addr); + reord_info = kmalloc(sizeof(struct reord_ctrl_info), GFP_ATOMIC); + if (!reord_info) + return NULL; + + memcpy(reord_info->mac_addr, mac_addr, ETH_ALEN); + for (i=0; i < 8; i++) { + preorder_ctrl = &reord_info->preorder_ctrl[i]; + preorder_ctrl->enable = true; + preorder_ctrl->ind_sn = 0xffff; + preorder_ctrl->wsize_b = AICWF_REORDER_WINSIZE; + preorder_ctrl->rx_priv= rx_priv; + INIT_LIST_HEAD(&preorder_ctrl->reord_list); + spin_lock_init(&preorder_ctrl->reord_list_lock); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + init_timer(&preorder_ctrl->reord_timer); + preorder_ctrl->reord_timer.data = (ulong) preorder_ctrl; + preorder_ctrl->reord_timer.function = reord_timeout_handler; +#else + timer_setup(&preorder_ctrl->reord_timer, reord_timeout_handler, 0); +#endif + INIT_WORK(&preorder_ctrl->reord_timer_work, reord_timeout_worker); + } + + return reord_info; +} + +int reord_flush_tid(struct aicwf_rx_priv *rx_priv, struct sk_buff *skb, u8 tid) +{ + struct reord_ctrl_info *reord_info; + struct reord_ctrl *preorder_ctrl; + struct rwnx_vif *rwnx_vif = (struct rwnx_vif *)rx_priv->rwnx_vif; + struct ethhdr *eh = (struct ethhdr *)(skb->data); + u8 *mac; + unsigned long flags; + u8 found = 0; + struct list_head *phead, *plist; + struct recv_msdu *prframe; + int ret; + + if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) + mac = eh->h_dest; + else if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) + mac = eh->h_source; + else { + printk("error mode:%d!\n", rwnx_vif->wdev.iftype); + dev_kfree_skb(skb); + return -1; + } + + spin_lock_bh(&rx_priv->stas_reord_lock); + list_for_each_entry(reord_info, &rx_priv->stas_reord_list, list) { + if (!memcmp(mac, reord_info->mac_addr, ETH_ALEN)) { + found = 1; + preorder_ctrl = &reord_info->preorder_ctrl[tid]; + break; + } + } + if (!found) { + spin_unlock_bh(&rx_priv->stas_reord_lock); + return 0; + } + spin_unlock_bh(&rx_priv->stas_reord_lock); + + if(preorder_ctrl->enable == false) + return 0; + spin_lock_irqsave(&preorder_ctrl->reord_list_lock, flags); + phead = &preorder_ctrl->reord_list; + while (1) { + if (list_empty(phead)) { + break; + } + plist = phead->next; + prframe = list_entry(plist, struct recv_msdu, reord_pending_list); + reord_single_frame_ind(rx_priv, prframe); + list_del_init(&(prframe->reord_pending_list)); + } + + AICWFDBG(LOGINFO, "flush:tid=%d", tid); + preorder_ctrl->enable = false; + spin_unlock_irqrestore(&preorder_ctrl->reord_list_lock, flags); + if (timer_pending(&preorder_ctrl->reord_timer)) + ret = del_timer_sync(&preorder_ctrl->reord_timer); + cancel_work_sync(&preorder_ctrl->reord_timer_work); + + return 0; +} + +void reord_deinit_sta(struct aicwf_rx_priv* rx_priv, struct reord_ctrl_info *reord_info) +{ + u8 i = 0; + //unsigned long flags; + struct reord_ctrl *preorder_ctrl = NULL; + int ret; + + if (rx_priv == NULL) { + txrx_err("bad rx_priv!\n"); + return; + } + + AICWFDBG(LOGINFO, "%s\n", __func__); + + for (i=0; i < 8; i++) { + struct recv_msdu *req, *next; + preorder_ctrl = &reord_info->preorder_ctrl[i]; + if(preorder_ctrl->enable){ + preorder_ctrl->enable = false; + if (timer_pending(&preorder_ctrl->reord_timer)) { + ret = del_timer_sync(&preorder_ctrl->reord_timer); + } + cancel_work_sync(&preorder_ctrl->reord_timer_work); + } + + spin_lock_bh(&preorder_ctrl->reord_list_lock); + list_for_each_entry_safe(req, next, &preorder_ctrl->reord_list, reord_pending_list) { + list_del_init(&req->reord_pending_list); + if(req->pkt != NULL) + dev_kfree_skb(req->pkt); + req->pkt = NULL; + reord_rxframe_free(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue, &req->rxframe_list); + } + + AICWFDBG(LOGINFO, "reord dinit in_irq():%d in_atomic:%d in_softirq:%d\r\n", (int)in_irq() + ,(int)in_atomic(), (int)in_softirq()); + spin_unlock_bh(&preorder_ctrl->reord_list_lock); + } + + spin_lock_bh(&rx_priv->stas_reord_lock); + list_del(&reord_info->list); + spin_unlock_bh(&rx_priv->stas_reord_lock); + kfree(reord_info); +} + +int reord_single_frame_ind(struct aicwf_rx_priv *rx_priv, struct recv_msdu *prframe) +{ + struct list_head *rxframes_freequeue = NULL; + struct sk_buff *skb = NULL; + struct rwnx_vif *rwnx_vif = (struct rwnx_vif *)rx_priv->rwnx_vif; + struct sk_buff_head list; + struct sk_buff *rx_skb; + + rxframes_freequeue = &rx_priv->rxframes_freequeue; + skb = prframe->pkt; + + #ifdef CONFIG_BR_SUPPORT + void *br_port = NULL; + + if (1) {//(check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) { + /* Insert NAT2.5 RX here! */ +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) + br_port = rwnx_vif->ndev->br_port; +#else + rcu_read_lock(); + br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data); + rcu_read_unlock(); +#endif + + if (br_port) { + int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb); + + if (nat25_handle_frame(rwnx_vif, skb) == -1) { + /* priv->ext_stats.rx_data_drops++; */ + /* DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); */ + /* return FAIL; */ + } + } + } +#endif /* CONFIG_BR_SUPPORT */ + + if (skb == NULL) { + txrx_err("skb is NULL\n"); + return -1; + } + + if(prframe->is_ap_reord) { + if(prframe->ap_resend_cnt) { + struct sk_buff *resend_skb = prframe->first_resend_skb; + struct sk_buff *resend_next_skb; + u8 resend_cnt = 0; + while(resend_skb) { + resend_cnt++; + resend_next_skb = resend_skb->next; + dev_kfree_skb(resend_skb); + resend_skb = resend_next_skb; + } + if(resend_cnt != prframe->ap_resend_cnt) + printk("resend cnt error: %d,%d\n", resend_cnt, prframe->ap_resend_cnt); + + if(!prframe->ap_fwd_cnt) { + prframe->pkt = NULL; + reord_rxframe_free(&rx_priv->freeq_lock, rxframes_freequeue, &prframe->rxframe_list); + return 0; + } //else + // printk("both has resend & fwd\n"); + } + } else { + if(!prframe->forward) { + dev_kfree_skb(skb); + prframe->pkt = NULL; + reord_rxframe_free(&rx_priv->freeq_lock, rxframes_freequeue, &prframe->rxframe_list); + return 0; + } + } + + __skb_queue_head_init(&list); + if(!prframe->is_ap_reord) + { + if(prframe->is_amsdu) { + rwnx_rxdata_process_amsdu(rwnx_vif->rwnx_hw, skb, rwnx_vif->vif_index, &list); //rxhdr not used below since skb free! + } else { + __skb_queue_head(&list, skb); + } + } else { + struct sk_buff *fwd_skb = prframe->first_fwd_skb; + struct sk_buff *fwd_next_skb; + u8 fwd_cnt = 0; + while(fwd_skb) { + //printk("single ind:%p\n", fwd_skb); + fwd_cnt++; + fwd_next_skb = fwd_skb->next; + __skb_queue_tail(&list, fwd_skb); + fwd_skb = fwd_next_skb; + } + if(fwd_cnt != prframe->ap_fwd_cnt) + printk("fwd cnt error: %d,%d\n", fwd_cnt, prframe->ap_fwd_cnt); + } + + while (!skb_queue_empty(&list)) { + rx_skb = __skb_dequeue(&list); + + rwnx_vif->net_stats.rx_packets++; + rwnx_vif->net_stats.rx_bytes += rx_skb->len; + //printk("netif sn=%d, len=%d\n", precv_frame->attrib.seq_num, skb->len); + rx_skb->dev = rwnx_vif->ndev; + rwnx_skb_align_8bytes(rx_skb); + rx_skb->protocol = eth_type_trans(rx_skb, rwnx_vif->ndev); + +#ifdef AICWF_ARP_OFFLOAD + if(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) { + arpoffload_proc(rx_skb, rwnx_vif); + } +#endif + memset(rx_skb->cb, 0, sizeof(rx_skb->cb)); + +#ifdef CONFIG_FILTER_TCP_ACK + filter_rx_tcp_ack(rwnx_vif->rwnx_hw, rx_skb->data, cpu_to_le16(rx_skb->len)); +#endif + +#ifdef CONFIG_RX_NETIF_RECV_SKB//AIDEN test + local_bh_disable(); + netif_receive_skb(rx_skb); + local_bh_enable(); +#else + if (in_interrupt()) { + netif_rx(rx_skb); + } else { + /* + * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ. + * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(rx_skb); +#else + ulong flags; + netif_rx(rx_skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif + } +#endif//CONFIG_RX_NETIF_RECV_SKB + } + + prframe->pkt = NULL; + reord_rxframe_free(&rx_priv->freeq_lock, rxframes_freequeue, &prframe->rxframe_list); + + return 0; +} + +bool reord_rxframes_process(struct aicwf_rx_priv *rx_priv, struct reord_ctrl *preorder_ctrl, int bforced) +{ + struct list_head *phead, *plist; + struct recv_msdu *prframe; + bool bPktInBuf = false; + + if (bforced == true) { + phead = &preorder_ctrl->reord_list; + if (list_empty(phead)) { + return false; + } + + plist = phead->next; + prframe = list_entry(plist, struct recv_msdu, reord_pending_list); + preorder_ctrl->ind_sn = prframe->seq_num; + } + + phead = &preorder_ctrl->reord_list; + if (list_empty(phead)) { + return bPktInBuf; + } + + list_for_each_entry(prframe, phead, reord_pending_list) { + if (!SN_LESS(preorder_ctrl->ind_sn, prframe->seq_num)) { + if (SN_EQUAL(preorder_ctrl->ind_sn, prframe->seq_num)) { + preorder_ctrl->ind_sn = (preorder_ctrl->ind_sn + 1) & 0xFFF; + } + } else { + bPktInBuf = true; + break; + } + } + + return bPktInBuf; +} + +void reord_rxframes_ind(struct aicwf_rx_priv *rx_priv, + struct reord_ctrl *preorder_ctrl) +{ + struct list_head *phead, *plist; + struct recv_msdu *prframe; + + //spin_lock_bh(&preorder_ctrl->reord_list_lock);//AIDEN + phead = &preorder_ctrl->reord_list; + while (1) { + //spin_lock_bh(&preorder_ctrl->reord_list_lock); + if (list_empty(phead)) { + //spin_unlock_bh(&preorder_ctrl->reord_list_lock); + break; + } + + plist = phead->next; + prframe = list_entry(plist, struct recv_msdu, reord_pending_list); + + if (!SN_LESS(preorder_ctrl->ind_sn, prframe->seq_num)) { + list_del_init(&(prframe->reord_pending_list)); + //spin_unlock_bh(&preorder_ctrl->reord_list_lock); + reord_single_frame_ind(rx_priv, prframe); + } else { + //spin_unlock_bh(&preorder_ctrl->reord_list_lock); + break; + } + } + //spin_unlock_bh(&preorder_ctrl->reord_list_lock);//AIDEN +} + +int reorder_timeout = REORDER_UPDATE_TIME; +module_param(reorder_timeout, int, 0660); + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +void reord_timeout_handler (ulong data) +#else +void reord_timeout_handler (struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + struct reord_ctrl *preorder_ctrl = (struct reord_ctrl *)data; +#else + struct reord_ctrl *preorder_ctrl = from_timer(preorder_ctrl, t, reord_timer); +#endif + + AICWFDBG(LOGTRACE, "%s Enter \r\n", __func__); + + + if (g_rwnx_plat->usbdev->state == USB_DOWN_ST) { + usb_err("bus is down\n"); + return; + } +#if 0//AIDEN + struct aicwf_rx_priv *rx_priv = preorder_ctrl->rx_priv; + + if (reord_rxframes_process(rx_priv, preorder_ctrl, true)==true) { + mod_timer(&preorder_ctrl->reord_timer, jiffies + msecs_to_jiffies(reorder_timeout/*REORDER_UPDATE_TIME*/)); + } +#endif + + if(!work_pending(&preorder_ctrl->reord_timer_work)) + schedule_work(&preorder_ctrl->reord_timer_work); + +} + +void reord_timeout_worker(struct work_struct *work) +{ + struct reord_ctrl *preorder_ctrl = container_of(work, struct reord_ctrl, reord_timer_work); + struct aicwf_rx_priv *rx_priv = preorder_ctrl->rx_priv; + + spin_lock_bh(&preorder_ctrl->reord_list_lock); +#if 1//AIDEN + if (reord_rxframes_process(rx_priv, preorder_ctrl, true)==true) { + mod_timer(&preorder_ctrl->reord_timer, jiffies + msecs_to_jiffies(reorder_timeout/*REORDER_UPDATE_TIME*/)); + } +#endif + + reord_rxframes_ind(rx_priv, preorder_ctrl); + spin_unlock_bh(&preorder_ctrl->reord_list_lock); + + return ; +} + +int reord_process_unit(struct recv_msdu *pframe, struct aicwf_rx_priv *rx_priv, struct sk_buff *skb, u16 seq_num, u8 tid, u8 forward, u8 is_amsdu) +{ + int ret=0; + u8 *mac; + struct reord_ctrl *preorder_ctrl; + struct reord_ctrl_info *reord_info; + struct rwnx_vif *rwnx_vif = (struct rwnx_vif *)rx_priv->rwnx_vif; + struct ethhdr *eh = (struct ethhdr *)(skb->data); + u8 *da = eh->h_dest; + u8 is_mcast = ((*da) & 0x01)? 1 : 0; + + #if 0 + struct recv_msdu *pframe; + pframe = reord_rxframe_alloc(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue); + if (!pframe) { + dev_kfree_skb(skb); + return -1; + } + #endif + + INIT_LIST_HEAD(&pframe->reord_pending_list); + pframe->seq_num = seq_num; + pframe->tid = tid; + pframe->rx_data = skb->data; + //pframe->len = skb->len; + pframe->pkt = skb; + pframe->forward = forward; + preorder_ctrl = pframe->preorder_ctrl; + pframe->is_amsdu = is_amsdu; + + if ((ntohs(eh->h_proto) == ETH_P_PAE) || is_mcast) + return reord_single_frame_ind(rx_priv, pframe); + + if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) + mac = eh->h_dest; + else if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) + mac = eh->h_source; + else { + dev_kfree_skb(skb); + return -1; + } + + spin_lock_bh(&rx_priv->stas_reord_lock); + list_for_each_entry(reord_info, &rx_priv->stas_reord_list, list) { + if (!memcmp(mac, reord_info->mac_addr, ETH_ALEN)) { + preorder_ctrl = &reord_info->preorder_ctrl[pframe->tid]; + break; + } + } + + + if (&reord_info->list == &rx_priv->stas_reord_list) {//first time??? + reord_info = reord_init_sta(rx_priv, mac); + //reord_info has 8 preorder_ctrl, + //There is a one-to-one matched preorder_ctrl and tid + if (!reord_info) { + spin_unlock_bh(&rx_priv->stas_reord_lock); + reord_single_frame_ind(rx_priv, pframe); + return -1; + } + list_add_tail(&reord_info->list, &rx_priv->stas_reord_list); + preorder_ctrl = &reord_info->preorder_ctrl[pframe->tid]; + } else { + if(preorder_ctrl->enable == false) { + preorder_ctrl->enable = true; + preorder_ctrl->ind_sn = 0xffff; + preorder_ctrl->wsize_b = AICWF_REORDER_WINSIZE; + preorder_ctrl->rx_priv= rx_priv; + } + } + spin_unlock_bh(&rx_priv->stas_reord_lock); + + if (preorder_ctrl->enable == false) { + preorder_ctrl->ind_sn = pframe->seq_num; + spin_lock_bh(&preorder_ctrl->reord_list_lock);//AIDEN + reord_single_frame_ind(rx_priv, pframe); + spin_unlock_bh(&preorder_ctrl->reord_list_lock);//AIDEN + preorder_ctrl->ind_sn = (preorder_ctrl->ind_sn + 1)%4096; + return 0; + } + + spin_lock_bh(&preorder_ctrl->reord_list_lock); + if (reord_need_check(preorder_ctrl, pframe->seq_num)) { +#if 1 + if(pframe->rx_data[42] == 0x80){//this is rtp package + if(pframe->seq_num == preorder_ctrl->ind_sn){ + //printk("%s pframe->seq_num1:%d \r\n", __func__, pframe->seq_num); + reord_single_frame_ind(rx_priv, pframe);//not need to reorder + }else{ + //printk("%s free pframe->seq_num:%d \r\n", __func__, pframe->seq_num); + if (pframe->pkt){ + dev_kfree_skb(pframe->pkt); + pframe->pkt = NULL; + } + reord_rxframe_free(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue, &pframe->rxframe_list); + } + }else{ + //printk("%s pframe->seq_num2:%d \r\n", __func__, pframe->seq_num); + reord_single_frame_ind(rx_priv, pframe);//not need to reorder + } +#else + reord_single_frame_ind(rx_priv, pframe);//not need to reor +#endif + + spin_unlock_bh(&preorder_ctrl->reord_list_lock); + return 0; + } + + if (reord_rxframe_enqueue(preorder_ctrl, pframe)) { + spin_unlock_bh(&preorder_ctrl->reord_list_lock); + goto fail; + } + + if (reord_rxframes_process(rx_priv, preorder_ctrl, false) == true) { + if (!timer_pending(&preorder_ctrl->reord_timer)) {//if this timer not countdown, mod_timer timer to start count down + ret = mod_timer(&preorder_ctrl->reord_timer, jiffies + msecs_to_jiffies(reorder_timeout/*REORDER_UPDATE_TIME*/)); + } + } else { + if(timer_pending(&preorder_ctrl->reord_timer)) { + ret = del_timer(&preorder_ctrl->reord_timer); + } + } + + reord_rxframes_ind(rx_priv, preorder_ctrl); + + spin_unlock_bh(&preorder_ctrl->reord_list_lock); + + + + return 0; + +fail: + if (pframe->pkt){ + dev_kfree_skb(pframe->pkt); + pframe->pkt = NULL; + } + reord_rxframe_free(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue, &pframe->rxframe_list); + return ret; +} + +int reord_need_check(struct reord_ctrl *preorder_ctrl, u16 seq_num) +{ + u8 wsize = preorder_ctrl->wsize_b; + u16 wend = (preorder_ctrl->ind_sn + wsize -1) & 0xFFF;//0xFFF: 12 bits for seq num + + //first time: wend = 0 + 64 - 1= 63 + + if (preorder_ctrl->ind_sn == 0xFFFF) {//first time + preorder_ctrl->ind_sn = seq_num; + } + + if( SN_LESS(seq_num, preorder_ctrl->ind_sn)) {//first time seq_num = preorder_ctrl->ind_sn + return -1;//no need to reord + } + + if (SN_EQUAL(seq_num, preorder_ctrl->ind_sn)) { + preorder_ctrl->ind_sn = (preorder_ctrl->ind_sn + 1) & 0xFFF; + } else if (SN_LESS(wend, seq_num)) { + if (seq_num >= (wsize-1)) + preorder_ctrl->ind_sn = seq_num-(wsize-1); + else + preorder_ctrl->ind_sn = 0xFFF - (wsize - (seq_num + 1)) + 1; + } + + return 0; +} + +int reord_rxframe_enqueue(struct reord_ctrl *preorder_ctrl, struct recv_msdu *prframe) +{ + struct list_head *preord_list = &preorder_ctrl->reord_list; + struct list_head *phead, *plist; + struct recv_msdu *pnextrframe; + +//first time:not any prframe in preord_list, so phead = phead->next + phead = preord_list; + plist = phead->next; + + while(phead != plist) { + pnextrframe = list_entry(plist, struct recv_msdu, reord_pending_list); + if(SN_LESS(pnextrframe->seq_num, prframe->seq_num)) { + plist = plist->next; + continue; + } else if(SN_EQUAL(pnextrframe->seq_num, prframe->seq_num)) { + return -1; + } else { + break; + } + } + + //link prframe in plist + list_add_tail(&(prframe->reord_pending_list), plist); + + return 0; +} +#endif /* AICWF_RX_REORDER */ + +void remove_sec_hdr_mgmt_frame(struct hw_rxhdr *hw_rxhdr,struct sk_buff *skb) +{ + u8 hdr_len = 24; + u8 mgmt_header[24] = {0}; + + if(!hw_rxhdr->hwvect.ga_frame){ + if(((skb->data[0] & 0x0C) == 0) && (skb->data[1] & 0x40) == 0x40){ //protect management frame + printk("frame type %x\n",skb->data[0]); + if(hw_rxhdr->hwvect.decr_status == RWNX_RX_HD_DECR_CCMP128){ + memcpy(mgmt_header,skb->data,hdr_len); + skb_pull(skb,8); + memcpy(skb->data,mgmt_header,hdr_len); + hw_rxhdr->hwvect.len -= 8; + } else { + printk("unsupport decr_status:%d\n",hw_rxhdr->hwvect.decr_status); + } + } + } +} + +void rwnx_rxdata_process_amsdu(struct rwnx_hw *rwnx_hw, struct sk_buff *skb, u8 vif_idx, + struct sk_buff_head *list) +{ + u16 len_alligned = 0; + u16 sublen = 0; + struct sk_buff *sub_skb = NULL; + struct rwnx_vif *rwnx_vif; + + //if (is_amsdu) + { + //skb_pull(skb, pull_len-8); + /* |amsdu sub1 | amsdu sub2 | ... */ + len_alligned = 0; + sublen = 0; + sub_skb = NULL; + while (skb->len > 16) { + sublen = (skb->data[12]<<8)|(skb->data[13]); + if (skb->len > (sublen+14)) + len_alligned = roundup(sublen + 14, 4); + else if (skb->len == (sublen+14)) + len_alligned = sublen+14; + else { + printk("accroding to amsdu: this will not happen\n"); + break; + } + //printk("sublen = %d, %x, %x, %x, %x\r\n", sublen,skb->data[0], skb->data[1], skb->data[12], skb->data[13]); +#if 1 + sub_skb = __dev_alloc_skb(sublen - 6 + 12, GFP_ATOMIC); + if(!sub_skb){ + printk("sub_skb alloc fail:%d\n", sublen); + break; + } + skb_put(sub_skb, sublen - 6 + 12); + memcpy(sub_skb->data, skb->data, MAC_ADDR_LEN); + memcpy(&sub_skb->data[6], &skb->data[6], MAC_ADDR_LEN); + memcpy(&sub_skb->data[12], &skb->data[14 + 6], sublen - 6); + + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, vif_idx); + if (!rwnx_vif) { + printk("frame received but no active vif (%d), skb->len:%u\n", vif_idx, skb->len); + //dev_kfree_skb(sub_skb); + break; + } + __skb_queue_tail(list, sub_skb); + + //printk("a:%p\n", sub_skb); + //if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, sub_skb, hw_rxhdr)) + // dev_kfree_skb(sub_skb); +#endif + skb_pull(skb, len_alligned); + } + //printk("af:%p\n", skb); + //printk("cnt=%d\n", cnt); + dev_kfree_skb(skb); + //return 0; + } +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void defrag_timeout_cb(ulong data) +#else +void defrag_timeout_cb(struct timer_list *t) +#endif +{ + struct defrag_ctrl_info *defrag_ctrl = NULL; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + defrag_ctrl = (struct defrag_ctrl_info *)data; +#else + defrag_ctrl = from_timer(defrag_ctrl, t, defrag_timer); +#endif + + printk("%s:%p\r\n", __func__, defrag_ctrl); + spin_lock_bh(&defrag_ctrl->rwnx_hw->defrag_lock); + list_del_init(&defrag_ctrl->list); + dev_kfree_skb(defrag_ctrl->skb); + kfree(defrag_ctrl); + spin_unlock_bh(&defrag_ctrl->rwnx_hw->defrag_lock); +} + +extern void rwnx_data_dump(char* tag, void* data, unsigned long len); + +u8 rwnx_rxdataind_aicwf(struct rwnx_hw *rwnx_hw, void *hostid, void *rx_priv) +{ + struct hw_rxhdr *hw_rxhdr; + struct rxdesc_tag *rxdesc = NULL; + struct rwnx_vif *rwnx_vif; + struct sk_buff *skb = hostid; + int msdu_offset = sizeof(struct hw_rxhdr) + 2; + u16_l status = 0; + struct aicwf_rx_priv *rx_priv_tmp; + u8 hdr_len = 24; + u8 ra[MAC_ADDR_LEN] = {0}; + u8 ta[MAC_ADDR_LEN] = {0}; + u8 ether_type[2] = {0}; + u8 pull_len = 0; + u16 seq_num = 0; + u8_l frag_num = 0; + u8 tid = 0; + u8 is_qos = 0; + u8 is_frag = 0; + struct defrag_ctrl_info *defrag_info = NULL; + struct defrag_ctrl_info *defrag_info_tmp = NULL; + struct sk_buff *skb_tmp = NULL; + int ret; + u8 sta_idx = 0; + u16_l frame_ctrl; + u8 is_amsdu = 0; + bool resend = false, forward = true; + const struct ethhdr *eth; + + REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND); + hw_rxhdr = (struct hw_rxhdr *)skb->data; + +#ifdef AICWF_RX_REORDER + if(hw_rxhdr->is_monitor_vif) { + status = RX_STAT_MONITOR; + //printk("monitor rx\n"); + } +#endif + + if(hw_rxhdr->flags_upload) + status |= RX_STAT_FORWARD; + + /* Check if we need to delete the buffer */ + if (status & RX_STAT_DELETE) { + /* Remove the SK buffer from the rxbuf_elems table */ + #if 0 + rwnx_ipc_rxbuf_elem_pull(rwnx_hw, skb); + #endif + /* Free the buffer */ + dev_kfree_skb(skb); + goto end; + } + + /* Check if we need to forward the buffer coming from a monitor interface */ + if (status & RX_STAT_MONITOR) { + struct sk_buff *skb_monitor = NULL; + struct hw_rxhdr hw_rxhdr_copy; + u8 rtap_len; + u16 frm_len = 0; + + //Check if monitor interface exists and is open + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, rwnx_hw->monitor_vif); + if (!rwnx_vif) { + dev_err(rwnx_hw->dev, "Received monitor frame but there is no monitor interface open, skb->len:%u\n", skb->len); + goto check_len_update; + } + + rwnx_rx_vector_convert(rwnx_hw, + &hw_rxhdr->hwvect.rx_vect1, + &hw_rxhdr->hwvect.rx_vect2); + rtap_len = rwnx_rx_rtap_hdrlen(&hw_rxhdr->hwvect.rx_vect1, false); + + if (status == RX_STAT_MONITOR) + { + /* Remove the SK buffer from the rxbuf_elems table. It will also + unmap the buffer and then sync the buffer for the cpu */ + //rwnx_ipc_rxbuf_elem_pull(rwnx_hw, skb); + skb->data += (msdu_offset + 2); //sdio/usb word allign + + //Save frame length + frm_len = le32_to_cpu(hw_rxhdr->hwvect.len); + + // Reserve space for frame + skb->len = frm_len; + + //Check if there is enough space to add the radiotap header + if (skb_headroom(skb) > rtap_len) { + + skb_monitor = skb; + + //Duplicate the HW Rx Header to override with the radiotap header + memcpy(&hw_rxhdr_copy, hw_rxhdr, sizeof(hw_rxhdr_copy)); + + hw_rxhdr = &hw_rxhdr_copy; + } else { + //Duplicate the skb and extend the headroom + skb_monitor = skb_copy_expand(skb, rtap_len, 0, GFP_ATOMIC); + + //Reset original skb->data pointer + skb->data = (void*) hw_rxhdr; + } + } else { + #ifdef CONFIG_RWNX_MON_DATA + skb_monitor = skb_copy_expand(skb, rtap_len, 0, GFP_ATOMIC); + skb_monitor->data += (msdu_offset + 2); //sdio/usb word allign + + //Save frame length + frm_len = le32_to_cpu(hw_rxhdr->hwvect.len); + #endif + } + + skb_reset_tail_pointer(skb); + skb->len = 0; + skb_reset_tail_pointer(skb_monitor); + skb_monitor->len = 0; + skb_put(skb_monitor, frm_len); + + if (rwnx_rx_monitor(rwnx_hw, rwnx_vif, skb_monitor, hw_rxhdr, rtap_len)) + dev_kfree_skb(skb_monitor); + + if (status == RX_STAT_MONITOR) { + if (skb_monitor != skb) { + dev_kfree_skb(skb); + } + } + } + +check_len_update: + /* Check if we need to update the length */ + if (status & RX_STAT_LEN_UPDATE) { + if (rxdesc){ + hw_rxhdr->hwvect.len = rxdesc->frame_len; + } + + if (status & RX_STAT_ETH_LEN_UPDATE) { + /* Update Length Field inside the Ethernet Header */ + struct ethhdr *hdr = (struct ethhdr *)((u8 *)hw_rxhdr + msdu_offset); + + if (rxdesc){ + hdr->h_proto = htons(rxdesc->frame_len - sizeof(struct ethhdr)); + } + } + + goto end; + } + + /* Check if it must be discarded after informing upper layer */ + if (status & RX_STAT_SPURIOUS) { + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)(skb->data + msdu_offset); + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx); + if (rwnx_vif) { + cfg80211_rx_spurious_frame(rwnx_vif->ndev, hdr->addr2, GFP_ATOMIC); + } + goto end; + } + + /* Check if we need to forward the buffer */ + if (status & RX_STAT_FORWARD) { + rwnx_rx_vector_convert(rwnx_hw, + &hw_rxhdr->hwvect.rx_vect1, + &hw_rxhdr->hwvect.rx_vect2); + skb_pull(skb, msdu_offset + 2); //+2 since sdio allign 58->60 +#define MAC_FCTRL_MOREFRAG 0x0400 + frame_ctrl = (skb->data[1] << 8) | skb->data[0]; + seq_num = ((skb->data[22] & 0xf0) >> 4) | (skb->data[23] << 4); + frag_num = (skb->data[22] & 0x0f); + is_amsdu = 0; + + if ((skb->data[0] & 0x0f) == 0x08) { + if ((skb->data[0] & 0x80) == 0x80) {//qos data + hdr_len = 26; + tid = skb->data[24] & 0x0F; + is_qos = 1; + if (skb->data[24] & 0x80) + is_amsdu = 1; + } + + if(skb->data[1] & 0x80)// htc + hdr_len += 4; + + if((skb->data[1] & 0x3) == 0x1) {// to ds + memcpy(ra, &skb->data[16], MAC_ADDR_LEN);//destination addr + memcpy(ta, &skb->data[10], MAC_ADDR_LEN);//source addr + } else if((skb->data[1] & 0x3) == 0x2) { //from ds + memcpy(ta, &skb->data[16], MAC_ADDR_LEN);//destination addr + memcpy(ra, &skb->data[4], MAC_ADDR_LEN);//BSSID + } + + pull_len += (hdr_len + 8); + + switch (hw_rxhdr->hwvect.decr_status) { + case RWNX_RX_HD_DECR_CCMP128: + pull_len += 8;//ccmp_header + //skb_pull(&skb->data[skb->len-8], 8); //ccmp_mic_len + memcpy(ether_type, &skb->data[hdr_len + 6 + 8], 2); + break; + case RWNX_RX_HD_DECR_TKIP: + pull_len += 8;//tkip_header + memcpy(ether_type, &skb->data[hdr_len + 6 + 8], 2); + break; + case RWNX_RX_HD_DECR_WEP: + pull_len += 4;//wep_header + memcpy(ether_type, &skb->data[hdr_len + 6 + 4], 2); + break; + case RWNX_RX_HD_DECR_WAPI: + pull_len += 18;//wapi_header + memcpy(ether_type, &skb->data[hdr_len + 6 + 18], 2); + break; + + default: + memcpy(ether_type, &skb->data[hdr_len + 6], 2); + break; + } + if(is_amsdu) + hw_rxhdr->flags_is_amsdu = 1; + else + hw_rxhdr->flags_is_amsdu = 0; + + if((ether_type[0] == 0x8e && ether_type[1] == 0x88) || (ether_type[0] == 0x88 && ether_type[1] == 0x8e)) + printk("rx eapol\n"); + if (is_amsdu) { + skb_pull(skb, pull_len-8); + } + + if (hw_rxhdr->flags_dst_idx != RWNX_INVALID_STA) + sta_idx = hw_rxhdr->flags_dst_idx; + + if (!hw_rxhdr->flags_need_reord && ((frame_ctrl & MAC_FCTRL_MOREFRAG) || frag_num)) { + printk("rxfrag:%d,%d,%d,sn=%d,%d\r\n", (frame_ctrl & MAC_FCTRL_MOREFRAG), frag_num, skb->len, seq_num,pull_len); + if (frame_ctrl & MAC_FCTRL_MOREFRAG) { + spin_lock_bh(&rwnx_hw->defrag_lock); + if (!list_empty(&rwnx_hw->defrag_list)) { + list_for_each_entry(defrag_info_tmp, &rwnx_hw->defrag_list, list) { + if ((defrag_info_tmp->sn == seq_num) && (defrag_info_tmp->tid == tid) && \ + defrag_info_tmp->sta_idx == sta_idx) { + defrag_info = defrag_info_tmp; + break; + } + } + } + + //printk("rx frag: sn=%d, fn=%d, skb->len=%d\r\n", seq_num, frag_num, skb->len); + if (defrag_info) { + is_frag = 1; + if (defrag_info->next_fn != frag_num) { + printk("discard:%d:%d\n", defrag_info->next_fn, frag_num); + dev_kfree_skb(skb); + spin_unlock_bh(&rwnx_hw->defrag_lock); + return 0; + } + + skb_put(defrag_info->skb, skb->len-(pull_len-8)); + memcpy(&defrag_info->skb->data[defrag_info->frm_len], \ + &skb->data[pull_len-8], skb->len - (pull_len-8)); + //printk("middle:%d,%d\n", skb->len-(pull_len-8), skb->len); + defrag_info->frm_len += (skb->len - (pull_len - 8)); + defrag_info->next_fn++; + dev_kfree_skb(skb); + spin_unlock_bh(&rwnx_hw->defrag_lock); + return 0; + } else { + defrag_info = kzalloc(sizeof(struct defrag_ctrl_info), GFP_KERNEL); + if (defrag_info == NULL) { + printk("no defrag_ctrl_info\r\n"); + dev_kfree_skb(skb); + spin_unlock_bh(&rwnx_hw->defrag_lock); + return 0; + } + defrag_info->skb = __dev_alloc_skb(2000, GFP_KERNEL); + if (defrag_info->skb == NULL) { + printk("no fragment skb\r\n"); + dev_kfree_skb(skb); + kfree(defrag_info); + spin_unlock_bh(&rwnx_hw->defrag_lock); + return 0; + } + is_frag = 1; + skb_pull(skb, pull_len); + skb_push(skb, 14); + memcpy(skb->data, ra, MAC_ADDR_LEN); + memcpy(&skb->data[6], ta, MAC_ADDR_LEN); + memcpy(&skb->data[12], ether_type, 2); + defrag_info->sn = seq_num; + defrag_info->next_fn = 1; + defrag_info->tid = tid; + defrag_info->sta_idx = sta_idx; + + skb_put(defrag_info->skb, skb->len); + memcpy(defrag_info->skb->data, skb->data, skb->len); + defrag_info->frm_len = skb->len; + defrag_info->rwnx_hw = rwnx_hw; + //printk("first:%p,%p,%p,%p,%p, %d,%d\r\n", defrag_info, defrag_info->skb, defrag_info->skb->head, defrag_info->skb->tail, defrag_info->skb->end, defrag_info->frm_len, skb->len); + list_add_tail(&defrag_info->list, &rwnx_hw->defrag_list); + spin_unlock_bh(&rwnx_hw->defrag_lock); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + init_timer(&defrag_info->defrag_timer); + defrag_info->defrag_timer.data = (unsigned long)defrag_info; + defrag_info->defrag_timer.function = defrag_timeout_cb; +#else + timer_setup(&defrag_info->defrag_timer, defrag_timeout_cb, 0); +#endif + ret = mod_timer(&defrag_info->defrag_timer, jiffies + msecs_to_jiffies(DEFRAG_MAX_WAIT)); + dev_kfree_skb(skb); + return 0; + } + } else { + //check whether the last fragment + if (!list_empty(&rwnx_hw->defrag_list)) { + spin_lock_bh(&rwnx_hw->defrag_lock); + list_for_each_entry(defrag_info_tmp, &rwnx_hw->defrag_list, list) { + if (((defrag_info_tmp->sn == seq_num) && (defrag_info_tmp->tid == tid) && \ + defrag_info_tmp->sta_idx == sta_idx)) { + defrag_info = defrag_info_tmp; + break; + } + } + + if (!defrag_info) + spin_unlock_bh(&rwnx_hw->defrag_lock); + else { + if (defrag_info->next_fn != frag_num) { + printk("discard:%d:%d\n", defrag_info->next_fn, frag_num); + dev_kfree_skb(skb); + spin_unlock_bh(&rwnx_hw->defrag_lock); + return 0; + } + + skb_put(defrag_info->skb, skb->len - (pull_len-8)); + memcpy(&defrag_info->skb->data[defrag_info->frm_len], \ + &skb->data[pull_len-8], skb->len - (pull_len-8)); + defrag_info->frm_len += (skb->len - (pull_len-8)); + is_frag = 1; + //printk("last: sn=%d, fn=%d, %d, %d\r\n", seq_num, frag_num, defrag_info->frm_len, skb->len); + + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx); + if (!rwnx_vif) { + printk("Frame received but no active vif (%d)", hw_rxhdr->flags_vif_idx); + dev_kfree_skb(skb); + spin_unlock_bh(&rwnx_hw->defrag_lock); + return 0; + } + dev_kfree_skb(skb); + + skb_tmp = defrag_info->skb; + list_del_init(&defrag_info->list); + if (timer_pending(&defrag_info->defrag_timer)) { + ret = del_timer(&defrag_info->defrag_timer); + } + kfree(defrag_info); + spin_unlock_bh(&rwnx_hw->defrag_lock); + + if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb_tmp, hw_rxhdr)) + dev_kfree_skb(skb_tmp); + + return 0; + } + } + } + } + + if (!is_frag && !is_amsdu) { + skb_pull(skb, pull_len); + skb_push(skb, 14); + memcpy(skb->data, ra, MAC_ADDR_LEN); + memcpy(&skb->data[6], ta, MAC_ADDR_LEN); + memcpy(&skb->data[12], ether_type, 2); + } + } + + if (hw_rxhdr->flags_is_80211_mpdu) { + remove_sec_hdr_mgmt_frame(hw_rxhdr,skb); + rwnx_rx_mgmt_any(rwnx_hw, skb, hw_rxhdr); + } else { + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx); + + if (!rwnx_vif) { + dev_err(rwnx_hw->dev, "Frame received but no active vif (%d), skb->len:%u\n", + hw_rxhdr->flags_vif_idx, skb->len); + dev_kfree_skb(skb); + goto end; + } + + if (hw_rxhdr->flags_sta_idx != RWNX_INVALID_STA) { + struct rwnx_sta *sta; + + sta = &rwnx_hw->sta_table[hw_rxhdr->flags_sta_idx]; + rwnx_rx_statistic(rwnx_hw, hw_rxhdr, sta); + + if (sta->vlan_idx != rwnx_vif->vif_index) { + rwnx_vif = rwnx_hw->vif_table[sta->vlan_idx]; + if (!rwnx_vif) { + dev_kfree_skb(skb); + goto end; + } + } + + if (hw_rxhdr->flags_is_4addr && !rwnx_vif->use_4addr) { + cfg80211_rx_unexpected_4addr_frame(rwnx_vif->ndev, + sta->mac_addr, GFP_ATOMIC); + } + } + + skb->priority = 256 + tid;//hw_rxhdr->flags_user_prio; + +#ifdef AICWF_RX_REORDER + rx_priv_tmp = rx_priv; + rx_priv_tmp->rwnx_vif = (void *)rwnx_vif; + + if( (rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ){ + if(is_qos && hw_rxhdr->flags_need_reord){ + struct recv_msdu *pframe; + pframe = reord_rxframe_alloc(&rx_priv_tmp->freeq_lock, &rx_priv_tmp->rxframes_freequeue); + if (!pframe) { + printk("no pframe\n"); + dev_kfree_skb(skb); + return -1; + } + pframe->is_ap_reord = 0; + pframe->ap_fwd_cnt = 0; + pframe->ap_resend_cnt = 0; + reord_process_unit(pframe, (struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid, 1, hw_rxhdr->flags_is_amsdu); + } else if (is_qos && !hw_rxhdr->flags_need_reord) { + reord_flush_tid((struct aicwf_rx_priv *)rx_priv, skb, tid); + if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr) && !hw_rxhdr->flags_is_amsdu) + dev_kfree_skb(skb); + } else { + if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr) && !hw_rxhdr->flags_is_amsdu) + dev_kfree_skb(skb); + } + } else if( (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) ) { +#ifdef CONFIG_DYNAMIC_PERPWR + struct rwnx_sta *sta; +#endif + struct sk_buff_head list; + struct sk_buff *rx_skb; + +#ifdef CONFIG_DYNAMIC_PERPWR + sta = &rwnx_hw->sta_table[hw_rxhdr->flags_sta_idx]; + rssi_update_txpwrloss(sta, hw_rxhdr->hwvect.rx_vect1.rssi1); +#endif +#ifdef CONFIG_BAND_STEERING + (&rwnx_hw->sta_table[hw_rxhdr->flags_sta_idx])->rssi = hw_rxhdr->hwvect.rx_vect1.rssi1; +#endif + u8 flags_dst_idx = hw_rxhdr->flags_dst_idx; + u8 flags_need_reord = hw_rxhdr->flags_need_reord; + u8 flags_vif_idx = hw_rxhdr->flags_vif_idx; + u8 flags_is_amsdu = hw_rxhdr->flags_is_amsdu; + //u8 reord_cnt = 0; + struct recv_msdu *pframe = NULL; + + __skb_queue_head_init(&list); + + //printk("dst_idx=%d, len=%d\n", flags_dst_idx, skb->len); + if (flags_is_amsdu) { + rwnx_rxdata_process_amsdu(rwnx_hw, skb, flags_vif_idx, &list); //rxhdr not used below since skb free! + } else { + rwnx_hw->stats.amsdus_rx[0]++; + __skb_queue_head(&list, skb); + } + + while (!skb_queue_empty(&list)) { + rx_skb = __skb_dequeue(&list); + + skb_reset_mac_header(rx_skb); + eth = eth_hdr(rx_skb); + //printk("da:%pM, %x,%x, len=%d\n", eth->h_dest, skb->data[12], skb->data[13], skb->len); + + resend = false; + forward = true; + if (unlikely(is_multicast_ether_addr(eth->h_dest))) { + /* broadcast pkt need to be forwared to upper layer and resent + on wireless interface */ + resend = true; + } else { + /* unicast pkt for STA inside the BSS, no need to forward to upper + layer simply resend on wireless interface */ + if ((flags_dst_idx != RWNX_INVALID_STA) && !flags_is_amsdu) { + struct rwnx_sta *sta = &rwnx_hw->sta_table[flags_dst_idx]; + if (sta->valid && (sta->vlan_idx == rwnx_vif->vif_index)) { + resend = true; + forward = false; + } + } else { + struct rwnx_sta *cur, *tmp; + bool found = false; + spin_lock_bh(&rwnx_hw->cb_lock); + list_for_each_entry_safe(cur, tmp, &rwnx_vif->ap.sta_list, list) { + if (!memcmp(cur->mac_addr, eth->h_dest, ETH_ALEN)) { + found = true; + break; + } + } + spin_unlock_bh(&rwnx_hw->cb_lock); + if(found) { + //printk("amsdu found da\n"); + resend = true; + forward = false; + } + } + } + + if(resend){ + rwnx_rx_data_skb_resend(rwnx_hw, rwnx_vif, rx_skb); + } + + if(forward) { + if (is_qos && flags_need_reord) { + if(pframe == NULL) { + pframe = reord_rxframe_alloc(&rx_priv_tmp->freeq_lock, &rx_priv_tmp->rxframes_freequeue); + if (!pframe) { + dev_kfree_skb(rx_skb); + return -1; + } + rx_skb->next = NULL; + pframe->ap_fwd_cnt = 1; + pframe->ap_resend_cnt = 0; + pframe->first_fwd_skb = rx_skb; + pframe->last_fwd_skb = rx_skb; + pframe->first_resend_skb = NULL; + pframe->last_resend_skb = NULL; + } else { + rx_skb->next = NULL; + pframe->ap_fwd_cnt++; + if(pframe->first_fwd_skb == NULL) { + pframe->first_fwd_skb = rx_skb; + pframe->last_fwd_skb = rx_skb; + } else { + pframe->last_fwd_skb->next = rx_skb; + pframe->last_fwd_skb = rx_skb; + } + } + } else if (is_qos && !flags_need_reord) { + reord_flush_tid((struct aicwf_rx_priv *)rx_priv, rx_skb, tid); + rwnx_rx_data_skb_forward(rwnx_hw, rwnx_vif, rx_skb); + } else + rwnx_rx_data_skb_forward(rwnx_hw, rwnx_vif, rx_skb); + } else if(resend) { + if (is_qos && flags_need_reord) { + if(pframe == NULL) { + pframe = reord_rxframe_alloc(&rx_priv_tmp->freeq_lock, &rx_priv_tmp->rxframes_freequeue); + if (!pframe) { + dev_kfree_skb(rx_skb); + return -1; + } + rx_skb->next = NULL; + pframe->ap_fwd_cnt = 0; + pframe->ap_resend_cnt = 1; + pframe->first_fwd_skb = NULL; + pframe->last_fwd_skb = NULL; + pframe->first_resend_skb = rx_skb; + pframe->last_resend_skb = rx_skb; + } else { + rx_skb->next = NULL; + pframe->ap_resend_cnt++; + if(pframe->first_resend_skb == NULL) { + pframe->first_resend_skb = rx_skb; + pframe->last_resend_skb = rx_skb; + } else { + pframe->last_resend_skb->next = rx_skb; + pframe->last_resend_skb = rx_skb; + } + } + } + else if (is_qos && !flags_need_reord) { + if(!reord_flush_tid((struct aicwf_rx_priv *)rx_priv, rx_skb, tid)) + dev_kfree_skb(rx_skb); + } else { + dev_kfree_skb(rx_skb); + } + } else + dev_kfree_skb(rx_skb); + } + + if(pframe != NULL) { + pframe->is_ap_reord = 1; + //printk("reord:%d,%d\n", pframe->ap_fwd_cnt, pframe->ap_resend_cnt); + reord_process_unit(pframe, (struct aicwf_rx_priv *)rx_priv, rx_skb, seq_num, tid, 0, 0); + } + } +#else + if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr)) + dev_kfree_skb(skb); +#endif + } + } + +end: + REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND); + return 0; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_rx.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_rx.h new file mode 100644 index 0000000..e7e71f6 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_rx.h @@ -0,0 +1,408 @@ +/** + ****************************************************************************** + * + * @file rwnx_rx.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#ifndef _RWNX_RX_H_ +#define _RWNX_RX_H_ + +#include "aicwf_txrxif.h" + +#define SERVER_PORT 67 +#define CLIENT_PORT 68 +#define DHCP_MAGIC 0x63825363 +#define DHCP_ACK 5 +#define DHCP_OPTION_MESSAGE_TYPE 53 /* RFC 2132 9.6, important for DHCP */ +#define DHCP_OPTION_END 255 + +enum rx_status_bits +{ + /// The buffer can be forwarded to the networking stack + RX_STAT_FORWARD = 1 << 0, + /// A new buffer has to be allocated + RX_STAT_ALLOC = 1 << 1, + /// The buffer has to be deleted + RX_STAT_DELETE = 1 << 2, + /// The length of the buffer has to be updated + RX_STAT_LEN_UPDATE = 1 << 3, + /// The length in the Ethernet header has to be updated + RX_STAT_ETH_LEN_UPDATE = 1 << 4, + /// Simple copy + RX_STAT_COPY = 1 << 5, + /// Spurious frame (inform upper layer and discard) + RX_STAT_SPURIOUS = 1 << 6, + /// packet for monitor interface + RX_STAT_MONITOR = 1 << 7, +}; + + +/* + * Decryption status subfields. + * { + */ +#define RWNX_RX_HD_DECR_UNENC 0 // ENCRYPTION TYPE NONE +#define RWNX_RX_HD_DECR_WEP 1 // ENCRYPTION TYPE WEP +#define RWNX_RX_HD_DECR_TKIP 2 // ENCRYPTION TYPE TKIP +#define RWNX_RX_HD_DECR_CCMP128 3 // ENCRYPTION TYPE CCMP128 +#define RWNX_RX_HD_DECR_CCMP256 4 // ENCRYPTION TYPE CCMP256 +#define RWNX_RX_HD_DECR_GCMP128 5 // ENCRYPTION TYPE GCMP128 +#define RWNX_RX_HD_DECR_GCMP256 6 // ENCRYPTION TYPE GCMP256 +#define RWNX_RX_HD_DECR_WAPI 7 // ENCRYPTION TYPE WAPI +// @} + +//#ifdef CONFIG_RWNX_MON_DATA +#if 0 +#define RX_MACHDR_BACKUP_LEN 64 +#endif + +struct rx_vector_1_old { + /** Receive Vector 1a */ + u32 leg_length :12; + u32 leg_rate : 4; + u32 ht_length :16; + + /** Receive Vector 1b */ + u32 _ht_length : 4; // FIXME + u32 short_gi : 1; + u32 stbc : 2; + u32 smoothing : 1; + u32 mcs : 7; + u32 pre_type : 1; + u32 format_mod : 3; + u32 ch_bw : 2; + u32 n_sts : 3; + u32 lsig_valid : 1; + u32 sounding : 1; + u32 num_extn_ss : 2; + u32 aggregation : 1; + u32 fec_coding : 1; + u32 dyn_bw : 1; + u32 doze_not_allowed : 1; + + /** Receive Vector 1c */ + u32 antenna_set : 8; + u32 partial_aid : 9; + u32 group_id : 6; + u32 first_user : 1; + s32 rssi1 : 8; + + /** Receive Vector 1d */ + s32 rssi2 : 8; + s32 rssi3 : 8; + s32 rssi4 : 8; + u32 reserved_1d : 8; +}; + +struct rx_leg_vect +{ + u8 dyn_bw_in_non_ht : 1; + u8 chn_bw_in_non_ht : 2; + u8 rsvd_nht : 4; + u8 lsig_valid : 1; +} __packed; + +struct rx_ht_vect +{ + u16 sounding : 1; + u16 smoothing : 1; + u16 short_gi : 1; + u16 aggregation : 1; + u16 stbc : 1; + u16 num_extn_ss : 2; + u16 lsig_valid : 1; + u16 mcs : 7; + u16 fec : 1; + u16 length :16; +} __packed; + +struct rx_vht_vect +{ + u8 sounding : 1; + u8 beamformed : 1; + u8 short_gi : 1; + u8 rsvd_vht1 : 1; + u8 stbc : 1; + u8 doze_not_allowed : 1; + u8 first_user : 1; + u8 rsvd_vht2 : 1; + u16 partial_aid : 9; + u16 group_id : 6; + u16 rsvd_vht3 : 1; + u32 mcs : 4; + u32 nss : 3; + u32 fec : 1; + u32 length :20; + u32 rsvd_vht4 : 4; +} __packed; + +struct rx_he_vect +{ + u8 sounding : 1; + u8 beamformed : 1; + u8 gi_type : 2; + u8 stbc : 1; + u8 rsvd_he1 : 3; + + u8 uplink_flag : 1; + u8 beam_change : 1; + u8 dcm : 1; + u8 he_ltf_type : 2; + u8 doppler : 1; + u8 rsvd_he2 : 2; + + u8 bss_color : 6; + u8 rsvd_he3 : 2; + + u8 txop_duration : 7; + u8 rsvd_he4 : 1; + + u8 pe_duration : 4; + u8 spatial_reuse : 4; + + u8 sig_b_comp_mode : 1; + u8 dcm_sig_b : 1; + u8 mcs_sig_b : 3; + u8 ru_size : 3; + + u32 mcs : 4; + u32 nss : 3; + u32 fec : 1; + u32 length :20; + u32 rsvd_he6 : 4; +} __packed; + +struct rx_vector_1 { + u8 format_mod : 4; + u8 ch_bw : 3; + u8 pre_type : 1; + u8 antenna_set : 8; + s32 rssi_leg : 8; + u32 leg_length :12; + u32 leg_rate : 4; + s32 rssi1 : 8; + + union + { + struct rx_leg_vect leg; + struct rx_ht_vect ht; + struct rx_vht_vect vht; + struct rx_he_vect he; + }; +} __packed; + +struct rx_vector_2_old { + /** Receive Vector 2a */ + u32 rcpi : 8; + u32 evm1 : 8; + u32 evm2 : 8; + u32 evm3 : 8; + + /** Receive Vector 2b */ + u32 evm4 : 8; + u32 reserved2b_1 : 8; + u32 reserved2b_2 : 8; + u32 reserved2b_3 : 8; + +}; + +struct rx_vector_2 { + /** Receive Vector 2a */ + u32 rcpi1 : 8; + u32 rcpi2 : 8; + u32 rcpi3 : 8; + u32 rcpi4 : 8; + + /** Receive Vector 2b */ + u32 evm1 : 8; + u32 evm2 : 8; + u32 evm3 : 8; + u32 evm4 : 8; +}; + +struct phy_channel_info_desc { + /** PHY channel information 1 */ + u32 phy_band : 8; + u32 phy_channel_type : 8; + u32 phy_prim20_freq : 16; + /** PHY channel information 2 */ + u32 phy_center1_freq : 16; + u32 phy_center2_freq : 16; +}; + +struct hw_vect { + /** Total length for the MPDU transfer */ + u32 len :16; + + u32 reserved : 8;//data type is included + /** AMPDU Status Information */ + u32 mpdu_cnt : 6; + u32 ampdu_cnt : 2; + + /** TSF Low */ + __le32 tsf_lo; + /** TSF High */ + __le32 tsf_hi; + + /** Receive Vector 1 */ + struct rx_vector_1 rx_vect1; + /** Receive Vector 2 */ + struct rx_vector_2 rx_vect2; + + /** Status **/ + u32 rx_vect2_valid : 1; + u32 resp_frame : 1; + /** Decryption Status */ + u32 decr_status : 3; + u32 rx_fifo_oflow : 1; + + /** Frame Unsuccessful */ + u32 undef_err : 1; + u32 phy_err : 1; + u32 fcs_err : 1; + u32 addr_mismatch : 1; + u32 ga_frame : 1; + u32 current_ac : 2; + + u32 frm_successful_rx : 1; + /** Descriptor Done */ + u32 desc_done_rx : 1; + /** Key Storage RAM Index */ + u32 key_sram_index : 10; + /** Key Storage RAM Index Valid */ + u32 key_sram_v : 1; + u32 type : 2; + u32 subtype : 4; +}; + +//#ifdef CONFIG_RWNX_MON_DATA +#if 0 +/// MAC header backup descriptor +struct mon_machdrdesc +{ + /// Length of the buffer + u32 buf_len; + /// Buffer containing mac header, LLC and SNAP + u8 buffer[RX_MACHDR_BACKUP_LEN]; +}; +#endif + +struct hw_rxhdr { + /** RX vector */ + struct hw_vect hwvect; + + /** PHY channel information */ + struct phy_channel_info_desc phy_info; + + /** RX flags */ + u32 flags_is_amsdu : 1; + u32 flags_is_80211_mpdu: 1; + u32 flags_is_4addr : 1; + u32 flags_new_peer : 1; +#if defined(AICWF_SDIO_SUPPORT) || defined(AICWF_USB_SUPPORT) + u32 flags_user_prio : 1; // aic: fw not fill any more + u32 flags_need_reord : 1; + u32 flags_upload : 1; +#else + u32 flags_user_prio : 3; +#endif +#ifndef AICWF_RX_REORDER + u32 flags_rsvd0 : 1; +#else + u32 is_monitor_vif : 1; +#endif + u32 flags_vif_idx : 8; // 0xFF if invalid VIF index + u32 flags_sta_idx : 8; // 0xFF if invalid STA index + u32 flags_dst_idx : 8; // 0xFF if unknown destination STA +//#ifdef CONFIG_RWNX_MON_DATA +#if 0 + /// MAC header backup descriptor (used only for MSDU when there is a monitor and a data interface) + struct mon_machdrdesc mac_hdr_backup; +#endif + /** Pattern indicating if the buffer is available for the driver */ + u32 pattern; +}; + +struct rwnx_legrate { + int idx; + int rate; +}; + +extern struct rwnx_legrate legrates_lut[]; +extern u16 tx_legrates_lut_rate[]; + +struct DHCPInfo { + u8 op; + u8 htype; + u8 hlen; + u8 hops; + u32 xid; + u16 secs; + u16 flags; + u32 ciaddr; + u32 yiaddr; + u32 siaddr; + u32 giaddr; + u8 chaddr[16]; + u8 sname[64]; + u8 file[128]; + u32 cookie; + u8 options[308]; /* 312 - cookie */ +}; + +u8 rwnx_unsup_rx_vec_ind(void *pthis, void *hostid); +u8 rwnx_rxdataind(void *pthis, void *hostid); +u8 rwnx_rxdataind_aicwf(struct rwnx_hw *rwnx_hw, void *hostid, void *rx_priv); +int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv); +#ifdef CONFIG_USB_MSG_IN_EP +int aicwf_process_msg_rxframes(struct aicwf_rx_priv *rx_priv); +#endif + +#ifdef AICWF_ARP_OFFLOAD +void arpoffload_proc(struct sk_buff *skb, struct rwnx_vif *rwnx_vif); +#endif +#ifdef AICWF_RX_REORDER +struct recv_msdu *reord_rxframe_alloc(spinlock_t *lock, struct list_head *q); +void reord_rxframe_free(spinlock_t *lock, struct list_head *q, struct list_head *list); +struct reord_ctrl_info *reord_init_sta( struct aicwf_rx_priv *rx_priv, const u8 *mac_addr); +void reord_deinit_sta(struct aicwf_rx_priv *rx_priv, struct reord_ctrl_info *reord_info); +int reord_need_check(struct reord_ctrl *preorder_ctrl, u16 seq_num); +int reord_rxframe_enqueue(struct reord_ctrl *preorder_ctrl, struct recv_msdu *prframe); +void reord_timeout_worker(struct work_struct *work); +int reord_single_frame_ind(struct aicwf_rx_priv *rx_priv, struct recv_msdu *prframe); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +void reord_timeout_handler (ulong data); +#else +void reord_timeout_handler (struct timer_list *t); +#endif + +#endif +void rwnx_rxdata_process_amsdu(struct rwnx_hw *rwnx_hw, struct sk_buff *skb, u8 vif_idx, + struct sk_buff_head *list); + +#ifdef CONFIG_HE_FOR_OLD_KERNEL +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 197) +struct element { + u8 id; + u8 datalen; + u8 data[]; +}; +/* element iteration helpers */ +#define for_each_element(_elem, _data, _datalen) \ + for (_elem = (const struct element *)(_data); \ + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ + (int)sizeof(*_elem) && \ + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ + (int)sizeof(*_elem) + _elem->datalen; \ + _elem = (const struct element *)(_elem->data + _elem->datalen)) + +#define for_each_element_id(element, _id, data, datalen) \ + for_each_element(element, data, datalen) \ + if (element->id == (_id)) +#endif +#endif + +#endif /* _RWNX_RX_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_strs.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_strs.c new file mode 100644 index 0000000..105301d --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_strs.c @@ -0,0 +1,269 @@ +/** + ****************************************************************************** + * + * @file rwnx_strs.c + * + * @brief Miscellaneous debug strings + * + * Copyright (C) RivieraWaves 2014-2019 + * + ****************************************************************************** + */ + +#include "lmac_msg.h" + +static const char *const rwnx_mmid2str[MSG_I(MM_MAX)] = { + [MSG_I(MM_RESET_REQ)] = "MM_RESET_REQ", + [MSG_I(MM_RESET_CFM)] = "MM_RESET_CFM", + [MSG_I(MM_START_REQ)] = "MM_START_REQ", + [MSG_I(MM_START_CFM)] = "MM_START_CFM", + [MSG_I(MM_VERSION_REQ)] = "MM_VERSION_REQ", + [MSG_I(MM_VERSION_CFM)] = "MM_VERSION_CFM", + [MSG_I(MM_ADD_IF_REQ)] = "MM_ADD_IF_REQ", + [MSG_I(MM_ADD_IF_CFM)] = "MM_ADD_IF_CFM", + [MSG_I(MM_REMOVE_IF_REQ)] = "MM_REMOVE_IF_REQ", + [MSG_I(MM_REMOVE_IF_CFM)] = "MM_REMOVE_IF_CFM", + [MSG_I(MM_STA_ADD_REQ)] = "MM_STA_ADD_REQ", + [MSG_I(MM_STA_ADD_CFM)] = "MM_STA_ADD_CFM", + [MSG_I(MM_STA_DEL_REQ)] = "MM_STA_DEL_REQ", + [MSG_I(MM_STA_DEL_CFM)] = "MM_STA_DEL_CFM", + [MSG_I(MM_SET_FILTER_REQ)] = "MM_SET_FILTER_REQ", + [MSG_I(MM_SET_FILTER_CFM)] = "MM_SET_FILTER_CFM", + [MSG_I(MM_SET_CHANNEL_REQ)] = "MM_SET_CHANNEL_REQ", + [MSG_I(MM_SET_CHANNEL_CFM)] = "MM_SET_CHANNEL_CFM", + [MSG_I(MM_SET_DTIM_REQ)] = "MM_SET_DTIM_REQ", + [MSG_I(MM_SET_DTIM_CFM)] = "MM_SET_DTIM_CFM", + [MSG_I(MM_SET_BEACON_INT_REQ)] = "MM_SET_BEACON_INT_REQ", + [MSG_I(MM_SET_BEACON_INT_CFM)] = "MM_SET_BEACON_INT_CFM", + [MSG_I(MM_SET_BASIC_RATES_REQ)] = "MM_SET_BASIC_RATES_REQ", + [MSG_I(MM_SET_BASIC_RATES_CFM)] = "MM_SET_BASIC_RATES_CFM", + [MSG_I(MM_SET_BSSID_REQ)] = "MM_SET_BSSID_REQ", + [MSG_I(MM_SET_BSSID_CFM)] = "MM_SET_BSSID_CFM", + [MSG_I(MM_SET_EDCA_REQ)] = "MM_SET_EDCA_REQ", + [MSG_I(MM_SET_EDCA_CFM)] = "MM_SET_EDCA_CFM", + [MSG_I(MM_SET_MODE_REQ)] = "MM_SET_MODE_REQ", + [MSG_I(MM_SET_MODE_CFM)] = "MM_SET_MODE_CFM", + [MSG_I(MM_SET_VIF_STATE_REQ)] = "MM_SET_VIF_STATE_REQ", + [MSG_I(MM_SET_VIF_STATE_CFM)] = "MM_SET_VIF_STATE_CFM", + [MSG_I(MM_SET_SLOTTIME_REQ)] = "MM_SET_SLOTTIME_REQ", + [MSG_I(MM_SET_SLOTTIME_CFM)] = "MM_SET_SLOTTIME_CFM", + [MSG_I(MM_SET_IDLE_REQ)] = "MM_SET_IDLE_REQ", + [MSG_I(MM_SET_IDLE_CFM)] = "MM_SET_IDLE_CFM", + [MSG_I(MM_KEY_ADD_REQ)] = "MM_KEY_ADD_REQ", + [MSG_I(MM_KEY_ADD_CFM)] = "MM_KEY_ADD_CFM", + [MSG_I(MM_KEY_DEL_REQ)] = "MM_KEY_DEL_REQ", + [MSG_I(MM_KEY_DEL_CFM)] = "MM_KEY_DEL_CFM", + [MSG_I(MM_BA_ADD_REQ)] = "MM_BA_ADD_REQ", + [MSG_I(MM_BA_ADD_CFM)] = "MM_BA_ADD_CFM", + [MSG_I(MM_BA_DEL_REQ)] = "MM_BA_DEL_REQ", + [MSG_I(MM_BA_DEL_CFM)] = "MM_BA_DEL_CFM", + [MSG_I(MM_PRIMARY_TBTT_IND)] = "MM_PRIMARY_TBTT_IND", + [MSG_I(MM_SECONDARY_TBTT_IND)] = "MM_SECONDARY_TBTT_IND", + [MSG_I(MM_SET_POWER_REQ)] = "MM_SET_POWER_REQ", + [MSG_I(MM_SET_POWER_CFM)] = "MM_SET_POWER_CFM", + [MSG_I(MM_DBG_TRIGGER_REQ)] = "MM_DBG_TRIGGER_REQ", + [MSG_I(MM_SET_PS_MODE_REQ)] = "MM_SET_PS_MODE_REQ", + [MSG_I(MM_SET_PS_MODE_CFM)] = "MM_SET_PS_MODE_CFM", + [MSG_I(MM_CHAN_CTXT_ADD_REQ)] = "MM_CHAN_CTXT_ADD_REQ", + [MSG_I(MM_CHAN_CTXT_ADD_CFM)] = "MM_CHAN_CTXT_ADD_CFM", + [MSG_I(MM_CHAN_CTXT_DEL_REQ)] = "MM_CHAN_CTXT_DEL_REQ", + [MSG_I(MM_CHAN_CTXT_DEL_CFM)] = "MM_CHAN_CTXT_DEL_CFM", + [MSG_I(MM_CHAN_CTXT_LINK_REQ)] = "MM_CHAN_CTXT_LINK_REQ", + [MSG_I(MM_CHAN_CTXT_LINK_CFM)] = "MM_CHAN_CTXT_LINK_CFM", + [MSG_I(MM_CHAN_CTXT_UNLINK_REQ)] = "MM_CHAN_CTXT_UNLINK_REQ", + [MSG_I(MM_CHAN_CTXT_UNLINK_CFM)] = "MM_CHAN_CTXT_UNLINK_CFM", + [MSG_I(MM_CHAN_CTXT_UPDATE_REQ)] = "MM_CHAN_CTXT_UPDATE_REQ", + [MSG_I(MM_CHAN_CTXT_UPDATE_CFM)] = "MM_CHAN_CTXT_UPDATE_CFM", + [MSG_I(MM_CHAN_CTXT_SCHED_REQ)] = "MM_CHAN_CTXT_SCHED_REQ", + [MSG_I(MM_CHAN_CTXT_SCHED_CFM)] = "MM_CHAN_CTXT_SCHED_CFM", + [MSG_I(MM_BCN_CHANGE_REQ)] = "MM_BCN_CHANGE_REQ", + [MSG_I(MM_BCN_CHANGE_CFM)] = "MM_BCN_CHANGE_CFM", + [MSG_I(MM_TIM_UPDATE_REQ)] = "MM_TIM_UPDATE_REQ", + [MSG_I(MM_TIM_UPDATE_CFM)] = "MM_TIM_UPDATE_CFM", + [MSG_I(MM_CONNECTION_LOSS_IND)] = "MM_CONNECTION_LOSS_IND", + [MSG_I(MM_CHANNEL_SWITCH_IND)] = "MM_CHANNEL_SWITCH_IND", + [MSG_I(MM_CHANNEL_PRE_SWITCH_IND)] = "MM_CHANNEL_PRE_SWITCH_IND", + [MSG_I(MM_REMAIN_ON_CHANNEL_REQ)] = "MM_REMAIN_ON_CHANNEL_REQ", + [MSG_I(MM_REMAIN_ON_CHANNEL_CFM)] = "MM_REMAIN_ON_CHANNEL_CFM", + [MSG_I(MM_REMAIN_ON_CHANNEL_EXP_IND)] = "MM_REMAIN_ON_CHANNEL_EXP_IND", + [MSG_I(MM_PS_CHANGE_IND)] = "MM_PS_CHANGE_IND", + [MSG_I(MM_TRAFFIC_REQ_IND)] = "MM_TRAFFIC_REQ_IND", + [MSG_I(MM_SET_PS_OPTIONS_REQ)] = "MM_SET_PS_OPTIONS_REQ", + [MSG_I(MM_SET_PS_OPTIONS_CFM)] = "MM_SET_PS_OPTIONS_CFM", + [MSG_I(MM_P2P_VIF_PS_CHANGE_IND)] = "MM_P2P_VIF_PS_CHANGE_IND", + [MSG_I(MM_CSA_COUNTER_IND)] = "MM_CSA_COUNTER_IND", + [MSG_I(MM_CHANNEL_SURVEY_IND)] = "MM_CHANNEL_SURVEY_IND", + [MSG_I(MM_SET_P2P_NOA_REQ)] = "MM_SET_P2P_NOA_REQ", + [MSG_I(MM_SET_P2P_OPPPS_REQ)] = "MM_SET_P2P_OPPPS_REQ", + [MSG_I(MM_SET_P2P_NOA_CFM)] = "MM_SET_P2P_NOA_CFM", + [MSG_I(MM_SET_P2P_OPPPS_CFM)] = "MM_SET_P2P_OPPPS_CFM", + [MSG_I(MM_CFG_RSSI_REQ)] = "MM_CFG_RSSI_REQ", + [MSG_I(MM_RSSI_STATUS_IND)] = "MM_RSSI_STATUS_IND", + [MSG_I(MM_CSA_FINISH_IND)] = "MM_CSA_FINISH_IND", + [MSG_I(MM_CSA_TRAFFIC_IND)] = "MM_CSA_TRAFFIC_IND", + [MSG_I(MM_MU_GROUP_UPDATE_REQ)] = "MM_MU_GROUP_UPDATE_REQ", + [MSG_I(MM_MU_GROUP_UPDATE_CFM)] = "MM_MU_GROUP_UPDATE_CFM", + + [MSG_I(MM_SET_ARPOFFLOAD_REQ)] = "MM_SET_ARPOFFLOAD_REQ", + [MSG_I(MM_SET_ARPOFFLOAD_CFM)] = "MM_SET_ARPOFFLOAD_CFM", + [MSG_I(MM_SET_AGG_DISABLE_REQ)] = "MM_SET_AGG_DISABLE_REQ", + [MSG_I(MM_SET_AGG_DISABLE_CFM)] = "MM_SET_AGG_DISABLE_CFM", + [MSG_I(MM_SET_COEX_REQ)] = "MM_SET_COEX_REQ", + [MSG_I(MM_SET_COEX_CFM)] = "MM_SET_COEX_CFM", + [MSG_I(MM_SET_RF_CONFIG_REQ)] = "MM_SET_RF_CONFIG_REQ", + [MSG_I(MM_SET_RF_CONFIG_CFM)] = "MM_SET_RF_CONFIG_CFM", + [MSG_I(MM_SET_RF_CALIB_REQ)] = "MM_SET_RF_CALIB_REQ", + [MSG_I(MM_SET_RF_CALIB_CFM)] = "MM_SET_RF_CALIB_CFM", + + [MSG_I(MM_GET_MAC_ADDR_REQ)] = "MM_GET_MAC_ADDR_REQ", + [MSG_I(MM_GET_MAC_ADDR_CFM)] = "MM_GET_MAC_ADDR_CFM", + [MSG_I(MM_GET_STA_INFO_REQ)] = "MM_GET_STA_INFO_REQ", + [MSG_I(MM_GET_STA_INFO_CFM)] = "MM_GET_STA_INFO_CFM", + [MSG_I(MM_SET_TXPWR_IDX_LVL_REQ)] = "MM_SET_TXPWR_IDX_LVL_REQ", + [MSG_I(MM_SET_TXPWR_IDX_LVL_CFM)] = "MM_SET_TXPWR_IDX_LVL_CFM", + [MSG_I(MM_SET_TXPWR_OFST_REQ)] = "MM_SET_TXPWR_OFST_REQ", + [MSG_I(MM_SET_TXPWR_OFST_CFM)] = "MM_SET_TXPWR_OFST_CFM", + [MSG_I(MM_SET_STACK_START_REQ)] = "MM_SET_STACK_START_REQ", + [MSG_I(MM_SET_STACK_START_CFM)] = "MM_SET_STACK_START_CFM", + [MSG_I(MM_APM_STALOSS_IND)] = "MM_APM_STALOSS_IND", + [MSG_I(MM_SET_VENDOR_HWCONFIG_REQ)] = "MM_SET_VENDOR_HWCONFIG_REQ", + [MSG_I(MM_SET_VENDOR_HWCONFIG_CFM)] = "MM_SET_VENDOR_HWCONFIG_CFM", + [MSG_I(MM_GET_FW_VERSION_REQ)] = "MM_GET_FW_VERSION_REQ", + [MSG_I(MM_GET_FW_VERSION_CFM)] = "MM_GET_FW_VERSION_CFM", + [MSG_I(MM_SET_RESUME_RESTORE_REQ)] = "MM_SET_RESUME_RESTORE_REQ", + [MSG_I(MM_SET_RESUME_RESTORE_CFM)] = "MM_SET_RESUME_RESTORE_CFM", + [MSG_I(MM_GET_WIFI_DISABLE_REQ)] = "MM_GET_WIFI_DISABLE_REQ", + [MSG_I(MM_GET_WIFI_DISABLE_CFM)] = "MM_GET_WIFI_DISABLE_CFM", + [MSG_I(MM_CFG_RSSI_CFM)] = "MM_CFG_RSSI_CFM", + [MSG_I(MM_SET_TXPWR_PER_STA_REQ)] = "MM_SET_TXPWR_PER_STA_REQ", + [MSG_I(MM_SET_TXPWR_PER_STA_CFM)] = "MM_SET_TXPWR_PER_STA_CFM", +}; + +static const char *const rwnx_dbgid2str[MSG_I(DBG_MAX)] = { + [MSG_I(DBG_MEM_READ_REQ)] = "DBG_MEM_READ_REQ", + [MSG_I(DBG_MEM_READ_CFM)] = "DBG_MEM_READ_CFM", + [MSG_I(DBG_MEM_WRITE_REQ)] = "DBG_MEM_WRITE_REQ", + [MSG_I(DBG_MEM_WRITE_CFM)] = "DBG_MEM_WRITE_CFM", + [MSG_I(DBG_SET_MOD_FILTER_REQ)] = "DBG_SET_MOD_FILTER_REQ", + [MSG_I(DBG_SET_MOD_FILTER_CFM)] = "DBG_SET_MOD_FILTER_CFM", + [MSG_I(DBG_SET_SEV_FILTER_REQ)] = "DBG_SET_SEV_FILTER_REQ", + [MSG_I(DBG_SET_SEV_FILTER_CFM)] = "DBG_SET_SEV_FILTER_CFM", + [MSG_I(DBG_ERROR_IND)] = "DBG_ERROR_IND", + [MSG_I(DBG_GET_SYS_STAT_REQ)] = "DBG_GET_SYS_STAT_REQ", + [MSG_I(DBG_GET_SYS_STAT_CFM)] = "DBG_GET_SYS_STAT_CFM", +}; + +static const char *const rwnx_scanid2str[MSG_I(SCAN_MAX)] = { + [MSG_I(SCAN_START_REQ)] = "SCAN_START_REQ", + [MSG_I(SCAN_START_CFM)] = "SCAN_START_CFM", + [MSG_I(SCAN_DONE_IND)] = "SCAN_DONE_IND", +}; + +static const char *const rwnx_tdlsid2str[MSG_I(TDLS_MAX)] = { + [MSG_I(TDLS_CHAN_SWITCH_CFM)] = "TDLS_CHAN_SWITCH_CFM", + [MSG_I(TDLS_CHAN_SWITCH_REQ)] = "TDLS_CHAN_SWITCH_REQ", + [MSG_I(TDLS_CHAN_SWITCH_IND)] = "TDLS_CHAN_SWITCH_IND", + [MSG_I(TDLS_CHAN_SWITCH_BASE_IND)] = "TDLS_CHAN_SWITCH_BASE_IND", + [MSG_I(TDLS_CANCEL_CHAN_SWITCH_REQ)] = "TDLS_CANCEL_CHAN_SWITCH_REQ", + [MSG_I(TDLS_CANCEL_CHAN_SWITCH_CFM)] = "TDLS_CANCEL_CHAN_SWITCH_CFM", + [MSG_I(TDLS_PEER_PS_IND)] = "TDLS_PEER_PS_IND", + [MSG_I(TDLS_PEER_TRAFFIC_IND_REQ)] = "TDLS_PEER_TRAFFIC_IND_REQ", + [MSG_I(TDLS_PEER_TRAFFIC_IND_CFM)] = "TDLS_PEER_TRAFFIC_IND_CFM", +}; + +#ifdef CONFIG_RWNX_FULLMAC + +static const char *const rwnx_scanuid2str[MSG_I(SCANU_MAX)] = { + [MSG_I(SCANU_START_REQ)] = "SCANU_START_REQ", + [MSG_I(SCANU_START_CFM)] = "SCANU_START_CFM", + [MSG_I(SCANU_JOIN_REQ)] = "SCANU_JOIN_REQ", + [MSG_I(SCANU_JOIN_CFM)] = "SCANU_JOIN_CFM", + [MSG_I(SCANU_RESULT_IND)] = "SCANU_RESULT_IND", + [MSG_I(SCANU_FAST_REQ)] = "SCANU_FAST_REQ", + [MSG_I(SCANU_FAST_CFM)] = "SCANU_FAST_CFM", + [MSG_I(SCANU_VENDOR_IE_REQ)] = "SCANU_VENDOR_IE_REQ", + [MSG_I(SCANU_VENDOR_IE_CFM)] = "SCANU_VENDOR_IE_CFM", + [MSG_I(SCANU_START_CFM_ADDTIONAL)] = "SCANU_START_CFM_ADDTIONAL", + [MSG_I(SCANU_CANCEL_REQ)] = "SCANU_CANCEL_REQ", + [MSG_I(SCANU_CANCEL_CFM)] = "SCANU_CANCEL_CFM", +}; + +static const char *const rwnx_meid2str[MSG_I(ME_MAX)] = { + [MSG_I(ME_CONFIG_REQ)] = "ME_CONFIG_REQ", + [MSG_I(ME_CONFIG_CFM)] = "ME_CONFIG_CFM", + [MSG_I(ME_CHAN_CONFIG_REQ)] = "ME_CHAN_CONFIG_REQ", + [MSG_I(ME_CHAN_CONFIG_CFM)] = "ME_CHAN_CONFIG_CFM", + [MSG_I(ME_SET_CONTROL_PORT_REQ)] = "ME_SET_CONTROL_PORT_REQ", + [MSG_I(ME_SET_CONTROL_PORT_CFM)] = "ME_SET_CONTROL_PORT_CFM", + [MSG_I(ME_TKIP_MIC_FAILURE_IND)] = "ME_TKIP_MIC_FAILURE_IND", + [MSG_I(ME_STA_ADD_REQ)] = "ME_STA_ADD_REQ", + [MSG_I(ME_STA_ADD_CFM)] = "ME_STA_ADD_CFM", + [MSG_I(ME_STA_DEL_REQ)] = "ME_STA_DEL_REQ", + [MSG_I(ME_STA_DEL_CFM)] = "ME_STA_DEL_CFM", + [MSG_I(ME_TX_CREDITS_UPDATE_IND)]= "ME_TX_CREDITS_UPDATE_IND", + [MSG_I(ME_RC_STATS_REQ)] = "ME_RC_STATS_REQ", + [MSG_I(ME_RC_STATS_CFM)] = "ME_RC_STATS_CFM", + [MSG_I(ME_RC_SET_RATE_REQ)] = "ME_RC_SET_RATE_REQ", + [MSG_I(ME_TRAFFIC_IND_REQ)] = "ME_TRAFFIC_IND_REQ", + [MSG_I(ME_TRAFFIC_IND_CFM)] = "ME_TRAFFIC_IND_CFM", + [MSG_I(ME_SET_PS_MODE_REQ)] = "ME_SET_PS_MODE_REQ", + [MSG_I(ME_SET_PS_MODE_CFM)] = "ME_SET_PS_MODE_CFM", +}; + +static const char *const rwnx_smid2str[MSG_I(SM_MAX)] = { + [MSG_I(SM_CONNECT_REQ)] = "SM_CONNECT_REQ", + [MSG_I(SM_CONNECT_CFM)] = "SM_CONNECT_CFM", + [MSG_I(SM_CONNECT_IND)] = "SM_CONNECT_IND", + [MSG_I(SM_DISCONNECT_REQ)] = "SM_DISCONNECT_REQ", + [MSG_I(SM_DISCONNECT_CFM)] = "SM_DISCONNECT_CFM", + [MSG_I(SM_DISCONNECT_IND)] = "SM_DISCONNECT_IND", + [MSG_I(SM_EXTERNAL_AUTH_REQUIRED_IND)] = "SM_EXTERNAL_AUTH_REQUIRED_IND", + [MSG_I(SM_EXTERNAL_AUTH_REQUIRED_RSP)] = "SM_EXTERNAL_AUTH_REQUIRED_RSP", + [MSG_I(SM_EXTERNAL_AUTH_REQUIRED_RSP_CFM)] = "SM_EXTERNAL_AUTH_REQUIRED_RSP_CFM", +}; + +static const char *const rwnx_apmid2str[MSG_I(APM_MAX)] = { + [MSG_I(APM_START_REQ)] = "APM_START_REQ", + [MSG_I(APM_START_CFM)] = "APM_START_CFM", + [MSG_I(APM_STOP_REQ)] = "APM_STOP_REQ", + [MSG_I(APM_STOP_CFM)] = "APM_STOP_CFM", + [MSG_I(APM_START_CAC_REQ)] = "APM_START_CAC_REQ", + [MSG_I(APM_START_CAC_CFM)] = "APM_START_CAC_CFM", + [MSG_I(APM_STOP_CAC_REQ)] = "APM_STOP_CAC_REQ", + [MSG_I(APM_STOP_CAC_CFM)] = "APM_STOP_CAC_CFM", + [MSG_I(APM_SET_BEACON_IE_REQ)] = "APM_SET_BEACON_IE_REQ", + [MSG_I(APM_SET_BEACON_IE_CFM)] = "APM_SET_BEACON_IE_CFM", +}; + +static const char *const rwnx_meshid2str[MSG_I(MESH_MAX)] = { + [MSG_I(MESH_START_REQ)] = "MESH_START_REQ", + [MSG_I(MESH_START_CFM)] = "MESH_START_CFM", + [MSG_I(MESH_STOP_REQ)] = "MESH_STOP_REQ", + [MSG_I(MESH_STOP_CFM)] = "MESH_STOP_CFM", + [MSG_I(MESH_UPDATE_REQ)] = "MESH_UPDATE_REQ", + [MSG_I(MESH_UPDATE_CFM)] = "MESH_UPDATE_CFM", + [MSG_I(MESH_PATH_CREATE_REQ)] = "MESH_PATH_CREATE_REQ", + [MSG_I(MESH_PATH_CREATE_CFM)] = "MESH_PATH_CREATE_CFM", + [MSG_I(MESH_PATH_UPDATE_REQ)] = "MESH_PATH_UPDATE_REQ", + [MSG_I(MESH_PATH_UPDATE_CFM)] = "MESH_PATH_UPDATE_CFM", + [MSG_I(MESH_PROXY_ADD_REQ)] = "MESH_PROXY_ADD_REQ", + [MSG_I(MESH_PEER_UPDATE_IND)] = "MESH_PEER_UPDATE_IND", + [MSG_I(MESH_PATH_UPDATE_IND)] = "MESH_PATH_UPDATE_IND", + [MSG_I(MESH_PROXY_UPDATE_IND)] = "MESH_PROXY_UPDATE_IND", +}; + +#endif /* CONFIG_RWNX_FULLMAC */ + +const char *const *rwnx_id2str[TASK_LAST_EMB + 1] = { + [TASK_MM] = rwnx_mmid2str, + [TASK_DBG] = rwnx_dbgid2str, + [TASK_SCAN] = rwnx_scanid2str, + [TASK_TDLS] = rwnx_tdlsid2str, +#ifdef CONFIG_RWNX_FULLMAC + [TASK_SCANU] = rwnx_scanuid2str, + [TASK_ME] = rwnx_meid2str, + [TASK_SM] = rwnx_smid2str, + [TASK_APM] = rwnx_apmid2str, + [TASK_MESH] = rwnx_meshid2str, +#endif +}; diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_strs.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_strs.h new file mode 100644 index 0000000..5cf3f0d --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_strs.h @@ -0,0 +1,31 @@ +/** + **************************************************************************************** + * + * @file rwnx_strs.h + * + * @brief Miscellaneous debug strings + * + * Copyright (C) RivieraWaves 2014-2019 + * + **************************************************************************************** + */ + +#ifndef _RWNX_STRS_H_ +#define _RWNX_STRS_H_ + +#ifdef CONFIG_RWNX_FHOST + +#define RWNX_ID2STR(tag) "Cmd" + +#else +#include "lmac_msg.h" + +#define RWNX_ID2STR(tag) (((MSG_T(tag) < ARRAY_SIZE(rwnx_id2str)) && \ + (rwnx_id2str[MSG_T(tag)]) && \ + ((rwnx_id2str[MSG_T(tag)])[MSG_I(tag)])) ? \ + (rwnx_id2str[MSG_T(tag)])[MSG_I(tag)] : "unknown") + +extern const char *const *rwnx_id2str[TASK_LAST_EMB + 1]; +#endif /* CONFIG_RWNX_FHOST */ + +#endif /* _RWNX_STRS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tdls.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tdls.c new file mode 100644 index 0000000..ce400ce --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tdls.c @@ -0,0 +1,796 @@ +/** + ****************************************************************************** + * + * @file rwnx_tx.c + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +/** + * INCLUDE FILES + ****************************************************************************** + */ + +#include "rwnx_tdls.h" +#include "rwnx_compat.h" + +/** + * FUNCTION DEFINITIONS + ****************************************************************************** + */ + +static u16 +rwnx_get_tdls_sta_capab(struct rwnx_vif *rwnx_vif, u16 status_code) +{ + u16 capab = 0; + + /* The capability will be 0 when sending a failure code */ + if (status_code != 0) + return capab; + + if (rwnx_vif->sta.ap->band != NL80211_BAND_2GHZ) + return capab; + + capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; + capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; + + return capab; +} + +static int +rwnx_tdls_prepare_encap_data(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + const u8 *peer, u8 action_code, u8 dialog_token, + u16 status_code, struct sk_buff *skb) +{ + struct ieee80211_tdls_data *tf; + tf = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_data) - sizeof(tf->u)); + + // set eth header + memcpy(tf->da, peer, ETH_ALEN); + memcpy(tf->sa, rwnx_hw->wiphy->perm_addr, ETH_ALEN); + tf->ether_type = cpu_to_be16(ETH_P_TDLS); + + // set common TDLS info + tf->payload_type = WLAN_TDLS_SNAP_RFTYPE; + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = action_code; + + // set action specific TDLS info + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + skb_put(skb, sizeof(tf->u.setup_req)); + tf->u.setup_req.dialog_token = dialog_token; + tf->u.setup_req.capability = + cpu_to_le16(rwnx_get_tdls_sta_capab(rwnx_vif, status_code)); + break; + + case WLAN_TDLS_SETUP_RESPONSE: + skb_put(skb, sizeof(tf->u.setup_resp)); + tf->u.setup_resp.status_code = cpu_to_le16(status_code); + tf->u.setup_resp.dialog_token = dialog_token; + tf->u.setup_resp.capability = + cpu_to_le16(rwnx_get_tdls_sta_capab(rwnx_vif, status_code)); + break; + + case WLAN_TDLS_SETUP_CONFIRM: + skb_put(skb, sizeof(tf->u.setup_cfm)); + tf->u.setup_cfm.status_code = cpu_to_le16(status_code); + tf->u.setup_cfm.dialog_token = dialog_token; + break; + + case WLAN_TDLS_TEARDOWN: + skb_put(skb, sizeof(tf->u.teardown)); + tf->u.teardown.reason_code = cpu_to_le16(status_code); + break; + + case WLAN_TDLS_DISCOVERY_REQUEST: + skb_put(skb, sizeof(tf->u.discover_req)); + tf->u.discover_req.dialog_token = dialog_token; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int +rwnx_prep_tdls_direct(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + const u8 *peer, u8 action_code, u8 dialog_token, + u16 status_code, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt; + + mgmt = (void *)skb_put(skb, 24); + memset(mgmt, 0, 24); + memcpy(mgmt->da, peer, ETH_ALEN); + memcpy(mgmt->sa, rwnx_hw->wiphy->perm_addr, ETH_ALEN); + memcpy(mgmt->bssid, rwnx_vif->sta.ap->mac_addr, ETH_ALEN); + + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + switch (action_code) { + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp)); + mgmt->u.action.category = WLAN_CATEGORY_PUBLIC; + mgmt->u.action.u.tdls_discover_resp.action_code = WLAN_PUB_ACTION_TDLS_DISCOVER_RES; + mgmt->u.action.u.tdls_discover_resp.dialog_token = dialog_token; + mgmt->u.action.u.tdls_discover_resp.capability = + cpu_to_le16(rwnx_get_tdls_sta_capab(rwnx_vif, status_code)); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +rwnx_add_srates_ie(struct rwnx_hw *rwnx_hw, struct sk_buff *skb) +{ + u8 i, rates, *pos; + int rate; + struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ]; + + rates = 8; + + if (skb_tailroom(skb) < rates + 2) + return -ENOMEM; + + pos = skb_put(skb, rates + 2); + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = rates; + for (i = 0; i < rates; i++) { + rate = rwnx_band_2GHz->bitrates[i].bitrate; + rate = DIV_ROUND_UP(rate, 5); + *pos++ = (u8)rate; + } + + return 0; +} + +static int +rwnx_add_ext_srates_ie(struct rwnx_hw *rwnx_hw, struct sk_buff *skb) +{ + u8 i, exrates, *pos; + int rate; + struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ]; + + exrates = rwnx_band_2GHz->n_bitrates - 8; + + if (skb_tailroom(skb) < exrates + 2) + return -ENOMEM; + + pos = skb_put(skb, exrates + 2); + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = exrates; + for (i = 8; i < (8+exrates); i++) { + rate = rwnx_band_2GHz->bitrates[i].bitrate; + rate = DIV_ROUND_UP(rate, 5); + *pos++ = (u8)rate; + } + + return 0; +} + +static void +rwnx_tdls_add_supp_channels(struct rwnx_hw *rwnx_hw, struct sk_buff *skb) +{ + /* + * Add possible channels for TDLS. These are channels that are allowed + * to be active. + */ + u8 subband_cnt = 0; + u8 *pos_subband; + u8 *pos = skb_put(skb, 2); + struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ]; + //#ifdef USE_5G + struct ieee80211_supported_band *rwnx_band_5GHz = rwnx_hw->wiphy->bands[NL80211_BAND_5GHZ]; + //#endif + + *pos++ = WLAN_EID_SUPPORTED_CHANNELS; + + /* + * 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as + * this doesn't happen in real world scenarios. + */ + + /* 2GHz, with 5MHz spacing */ + pos_subband = skb_put(skb, 2); + if (rwnx_band_2GHz->n_channels > 0) + { + *pos_subband++ = ieee80211_frequency_to_channel(rwnx_band_2GHz->channels[0].center_freq); + *pos_subband++ = rwnx_band_2GHz->n_channels; + subband_cnt++; + } + + /* 5GHz, with 20MHz spacing */ + pos_subband = skb_put(skb, 2); + //#ifdef USE_5G + if(rwnx_hw->band_5g_support){ + if (rwnx_band_5GHz->n_channels > 0) + { + *pos_subband++ = ieee80211_frequency_to_channel(rwnx_band_5GHz->channels[0].center_freq); + *pos_subband++ = rwnx_band_5GHz->n_channels; + subband_cnt++; + } + } + //#endif + /* length */ + *pos = 2 * subband_cnt; +} + +static void +rwnx_tdls_add_ext_capab(struct rwnx_hw *rwnx_hw, struct sk_buff *skb) +{ + u8 *pos = (void *)skb_put(skb, 7); + bool chan_switch = rwnx_hw->wiphy->features & + NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + + *pos++ = WLAN_EID_EXT_CAPABILITY; + *pos++ = 5; /* len */ + *pos++ = 0x0; + *pos++ = 0x0; + *pos++ = 0x0; + *pos++ = WLAN_EXT_CAPA4_TDLS_BUFFER_STA | + (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0); + *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED; +} + +static void +rwnx_add_wmm_info_ie(struct sk_buff *skb, u8 qosinfo) +{ + u8 *pos = (void *)skb_put(skb, 9); + + *pos++ = WLAN_EID_VENDOR_SPECIFIC; + *pos++ = 7; /* len */ + *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */ + *pos++ = 0x50; + *pos++ = 0xf2; + *pos++ = 2; /* WME */ + *pos++ = 0; /* WME info */ + *pos++ = 1; /* WME ver */ + *pos++ = qosinfo; /* U-APSD no in use */ +} + +/* translate numbering in the WMM parameter IE to the mac80211 notation */ +static u8 rwnx_ac_from_wmm(int ac) +{ + switch (ac) { + default: + WARN_ON_ONCE(1); + case 0: + return AC_BE; + case 1: + return AC_BK; + case 2: + return AC_VI; + case 3: + return AC_VO; + } +} + +static void +rwnx_add_wmm_param_ie(struct sk_buff *skb, u8 acm_bits, u32 *ac_params) +{ + struct ieee80211_wmm_param_ie *wmm; + int i, j; + u8 cw_min, cw_max; + bool acm; + + wmm = (void *)skb_put(skb, sizeof(struct ieee80211_wmm_param_ie)); + memset(wmm, 0, sizeof(*wmm)); + + wmm->element_id = WLAN_EID_VENDOR_SPECIFIC; + wmm->len = sizeof(*wmm) - 2; + + wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */ + wmm->oui[1] = 0x50; + wmm->oui[2] = 0xf2; + wmm->oui_type = 2; /* WME */ + wmm->oui_subtype = 1; /* WME param */ + wmm->version = 1; /* WME ver */ + wmm->qos_info = 0; /* U-APSD not in use */ + + /* + * Use the EDCA parameters defined for the BSS, or default if the AP + * doesn't support it, as mandated by 802.11-2012 section 10.22.4 + */ + for (i = 0; i < AC_MAX; i++) { + j = rwnx_ac_from_wmm(i); + cw_min = (ac_params[j] & 0xF0 ) >> 4; + cw_max = (ac_params[j] & 0xF00 ) >> 8; + acm = (acm_bits & (1 << j)) != 0; + + wmm->ac[i].aci_aifsn = (i << 5) | (acm << 4) | (ac_params[j] & 0xF); + wmm->ac[i].cw = (cw_max << 4) | cw_min; + wmm->ac[i].txop_limit = (ac_params[j] & 0x0FFFF000 ) >> 12; + } +} + +static void +rwnx_tdls_add_oper_classes(struct rwnx_vif *rwnx_vif, struct sk_buff *skb) +{ + u8 *pos; + u8 op_class; + struct cfg80211_chan_def chan_def; + struct ieee80211_channel chan; + + chan.band = rwnx_vif->sta.ap->band; + chan.center_freq = rwnx_vif->sta.ap->center_freq; + chan_def.chan = &chan; + chan_def.width = rwnx_vif->sta.ap->width; + chan_def.center_freq1 = rwnx_vif->sta.ap->center_freq1; + chan_def.center_freq2 = rwnx_vif->sta.ap->center_freq2; + + if (!ieee80211_chandef_to_operating_class(&chan_def, &op_class)) + return; + + pos = skb_put(skb, 4); + *pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES; + *pos++ = 2; /* len */ + + // current op class + *pos++ = op_class; + *pos++ = op_class; /* give current operating class as alternate too */ + + // need to add 5GHz classes? +} + +static void +rwnx_ie_build_ht_cap(struct sk_buff *skb, struct ieee80211_sta_ht_cap *ht_cap, + u16 cap) +{ + u8 *pos; + __le16 tmp; + + pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_cap); + memset(pos, 0, sizeof(struct ieee80211_ht_cap)); + + /* capability flags */ + tmp = cpu_to_le16(cap); + memcpy(pos, &tmp, sizeof(u16)); + pos += sizeof(u16); + + /* AMPDU parameters */ + *pos++ = ht_cap->ampdu_factor | + (ht_cap->ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + + /* MCS set */ + memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs)); + pos += sizeof(ht_cap->mcs); + + /* extended capabilities */ + pos += sizeof(__le16); + + /* BF capabilities */ + pos += sizeof(__le32); + + /* antenna selection */ + pos += sizeof(u8); +} + +static void +rwnx_ie_build_vht_cap(struct sk_buff *skb, struct ieee80211_sta_vht_cap *vht_cap, + u32 cap) +{ + u8 *pos; + __le32 tmp; + + pos = skb_put(skb, 14); + + *pos++ = WLAN_EID_VHT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_vht_cap); + memset(pos, 0, sizeof(struct ieee80211_vht_cap)); + + /* capability flags */ + tmp = cpu_to_le32(cap); + memcpy(pos, &tmp, sizeof(u32)); + pos += sizeof(u32); + + /* VHT MCS set */ + memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs)); + pos += sizeof(vht_cap->vht_mcs); +} + +static void +rwnx_tdls_add_bss_coex_ie(struct sk_buff *skb) +{ + u8 *pos = (void *)skb_put(skb, 3); + + *pos++ = WLAN_EID_BSS_COEX_2040; + *pos++ = 1; /* len */ + + *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST; +} + +static void +rwnx_tdls_add_link_ie(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, const u8 *peer, + bool initiator) +{ + struct ieee80211_tdls_lnkie *lnkid; + const u8 *init_addr, *rsp_addr; + + if (initiator) { + init_addr = rwnx_hw->wiphy->perm_addr; + rsp_addr = peer; + } else { + init_addr = peer; + rsp_addr = rwnx_hw->wiphy->perm_addr; + } + + lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie)); + + lnkid->ie_type = WLAN_EID_LINK_ID; + lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2; + + memcpy(lnkid->bssid, rwnx_vif->sta.ap->mac_addr, ETH_ALEN); + memcpy(lnkid->init_sta, init_addr, ETH_ALEN); + memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN); +} + +static void +rwnx_tdls_add_aid_ie(struct rwnx_vif *rwnx_vif, struct sk_buff *skb) +{ + u8 *pos = (void *)skb_put(skb, 4); + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) + *pos++ = WLAN_EID_AID; + #else + *pos++ = 197; + #endif + *pos++ = 2; /* len */ + *pos++ = rwnx_vif->sta.ap->aid; +} + +static u8 * +rwnx_ie_build_ht_oper(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + u16 prot_mode) +{ + struct ieee80211_ht_operation *ht_oper; + /* Build HT Information */ + *pos++ = WLAN_EID_HT_OPERATION; + *pos++ = sizeof(struct ieee80211_ht_operation); + ht_oper = (struct ieee80211_ht_operation *)pos; + ht_oper->primary_chan = ieee80211_frequency_to_channel( + rwnx_vif->sta.ap->center_freq); + switch (rwnx_vif->sta.ap->width) { + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_40: + if (rwnx_vif->sta.ap->center_freq1 > rwnx_vif->sta.ap->center_freq) + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + else + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + break; + default: + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; + break; + } + if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && + rwnx_vif->sta.ap->width != NL80211_CHAN_WIDTH_20_NOHT && + rwnx_vif->sta.ap->width != NL80211_CHAN_WIDTH_20) + ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; + + ht_oper->operation_mode = cpu_to_le16(prot_mode); + ht_oper->stbc_param = 0x0000; + + /* It seems that Basic MCS set and Supported MCS set + are identical for the first 10 bytes */ + memset(&ht_oper->basic_set, 0, 16); + memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10); + + return pos + sizeof(struct ieee80211_ht_operation); +} + +static u8 * +rwnx_ie_build_vht_oper(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + u16 prot_mode) +{ + struct ieee80211_vht_operation *vht_oper; + /* Build HT Information */ + *pos++ = WLAN_EID_VHT_OPERATION; + *pos++ = sizeof(struct ieee80211_vht_operation); + vht_oper = (struct ieee80211_vht_operation *)pos; + + switch (rwnx_vif->sta.ap->width) { + case NL80211_CHAN_WIDTH_80: + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; // Channel Width + CCFS0(vht_oper) = + ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq); // Channel Center Frequency Segment 0 + CCFS1(vht_oper) = 0; // Channel Center Frequency Segment 1 (N.A.) + break; + case NL80211_CHAN_WIDTH_160: + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ; // Channel Width + CCFS0(vht_oper) = + ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq); // Channel Center Frequency Segment 0 + CCFS1(vht_oper) = 0; // Channel Center Frequency Segment 1 (N.A.) + break; + case NL80211_CHAN_WIDTH_80P80: + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ; // Channel Width + CCFS0(vht_oper) = + ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq1); // Channel Center Frequency Segment 0 + CCFS1(vht_oper) = + ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq2); // Channel Center Frequency Segment 1 + break; + default: + vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT; + CCFS0(vht_oper) = 0; + CCFS1(vht_oper) = 0; + break; + } + + vht_oper->basic_mcs_set = cpu_to_le16(rwnx_hw->mod_params->mcs_map); + + return pos + sizeof(struct ieee80211_vht_operation); + +} + +static void +rwnx_tdls_add_setup_start_ies(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, const u8 *peer, + u8 action_code, bool initiator, + const u8 *extra_ies, size_t extra_ies_len) +{ + enum nl80211_band band = rwnx_vif->sta.ap->band; + struct ieee80211_supported_band *sband; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + size_t offset = 0, noffset; + u8 *pos; + + rcu_read_lock(); + + rwnx_add_srates_ie(rwnx_hw, skb); + rwnx_add_ext_srates_ie(rwnx_hw, skb); + rwnx_tdls_add_supp_channels(rwnx_hw, skb); + rwnx_tdls_add_ext_capab(rwnx_hw, skb); + + /* add the QoS element if we support it */ + if (/*local->hw.queues >= IEEE80211_NUM_ACS &&*/ + action_code != WLAN_PUB_ACTION_TDLS_DISCOVER_RES) + rwnx_add_wmm_info_ie(skb, 0); /* no U-APSD */ + + rwnx_tdls_add_oper_classes(rwnx_vif, skb); + + /* + * with TDLS we can switch channels, and HT-caps are not necessarily + * the same on all bands. The specification limits the setup to a + * single HT-cap, so use the current band for now. + */ + sband = rwnx_hw->wiphy->bands[band]; + memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); + if (((action_code == WLAN_TDLS_SETUP_REQUEST) || + (action_code == WLAN_TDLS_SETUP_RESPONSE) || + (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) && + ht_cap.ht_supported /* (!sta || sta->sta.ht_cap.ht_supported)*/) { + rwnx_ie_build_ht_cap(skb, &ht_cap, ht_cap.cap); + } + + if (ht_cap.ht_supported && + (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + rwnx_tdls_add_bss_coex_ie(skb); + + rwnx_tdls_add_link_ie(rwnx_hw, rwnx_vif, skb, peer, initiator); + + memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); + if (vht_cap.vht_supported) { + rwnx_tdls_add_aid_ie(rwnx_vif, skb); + rwnx_ie_build_vht_cap(skb, &vht_cap, vht_cap.cap); + // Operating mode Notification (optional) + } + + /* add any remaining IEs */ + if (extra_ies_len) { + noffset = extra_ies_len; + pos = skb_put(skb, noffset - offset); + memcpy(pos, extra_ies + offset, noffset - offset); + } + + rcu_read_unlock(); +} + + +static void +rwnx_tdls_add_setup_cfm_ies(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, const u8 *peer, bool initiator, + const u8 *extra_ies, size_t extra_ies_len) +{ + struct ieee80211_supported_band *sband; + enum nl80211_band band = rwnx_vif->sta.ap->band; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + + size_t offset = 0, noffset; + struct rwnx_sta *sta, *ap_sta; + u8 *pos; + + rcu_read_lock(); + + sta = rwnx_get_sta(rwnx_hw, peer); + ap_sta = rwnx_vif->sta.ap; + if (WARN_ON_ONCE(!sta || !ap_sta)) { + rcu_read_unlock(); + return; + } + + /* add the QoS param IE if both the peer and we support it */ + if (sta->qos) + rwnx_add_wmm_param_ie(skb, ap_sta->acm, ap_sta->ac_param); + + /* if HT support is only added in TDLS, we need an HT-operation IE */ + sband = rwnx_hw->wiphy->bands[band]; + memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); + if (ht_cap.ht_supported && !ap_sta->ht && sta->ht) { + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); + /* send an empty HT operation IE */ + rwnx_ie_build_ht_oper(rwnx_hw, rwnx_vif, pos, &ht_cap, 0); + } + + rwnx_tdls_add_link_ie(rwnx_hw, rwnx_vif, skb, peer, initiator); + + memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); + if (vht_cap.vht_supported && !ap_sta->vht && sta->vht) { + pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); + rwnx_ie_build_vht_oper(rwnx_hw, rwnx_vif, pos, &ht_cap, 0); + // Operating mode Notification (optional) + } + + /* add any remaining IEs */ + if (extra_ies_len) { + noffset = extra_ies_len; + pos = skb_put(skb, noffset - offset); + memcpy(pos, extra_ies + offset, noffset - offset); + } + + rcu_read_unlock(); +} + +static void +rwnx_tdls_add_ies(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, const u8 *peer, + u8 action_code, u16 status_code, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_SETUP_RESPONSE: + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + if (status_code == 0) + rwnx_tdls_add_setup_start_ies(rwnx_hw, rwnx_vif, skb, peer, action_code, + initiator, extra_ies, extra_ies_len); + break; + case WLAN_TDLS_SETUP_CONFIRM: + if (status_code == 0) + rwnx_tdls_add_setup_cfm_ies(rwnx_hw, rwnx_vif, skb, peer, initiator, + extra_ies, extra_ies_len); + break; + + case WLAN_TDLS_TEARDOWN: + case WLAN_TDLS_DISCOVERY_REQUEST: + if (extra_ies_len) + memcpy(skb_put(skb, extra_ies_len), extra_ies, + extra_ies_len); + if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN) + rwnx_tdls_add_link_ie(rwnx_hw, rwnx_vif, skb, peer, initiator); + break; + } +} + +int +rwnx_tdls_send_mgmt_packet_data(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + const u8 *peer, u8 action_code, u8 dialog_token, + u16 status_code, u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct sk_buff *skb; + int ret = 0; + struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ]; + //#ifdef USE_5G + struct ieee80211_supported_band *rwnx_band_5GHz = rwnx_hw->wiphy->bands[NL80211_BAND_5GHZ]; + //#endif + int channels = rwnx_band_2GHz->n_channels; + + if (rwnx_hw->band_5g_support){ + channels += rwnx_band_5GHz->n_channels; + } + + skb = netdev_alloc_skb(rwnx_vif->ndev, + sizeof(struct ieee80211_tdls_data) + // ethhdr + TDLS info + 10 + /* supported rates */ + 6 + /* extended supported rates */ + (2 + channels) + /* supported channels */ + //#ifdef USE_5G + //(2 + rwnx_band_2GHz->n_channels + rwnx_band_5GHz->n_channels) + /* supported channels */ + //#else + //(2 + rwnx_band_2GHz->n_channels) + /* supported channels */ + //#endif + sizeof(struct ieee_types_extcap) + + sizeof(struct ieee80211_wmm_param_ie) + + 4 + /* oper classes */ + 28 + //sizeof(struct ieee80211_ht_cap) + + sizeof(struct ieee_types_bss_co_2040) + + sizeof(struct ieee80211_tdls_lnkie) + + (2 + sizeof(struct ieee80211_vht_cap)) + + 4 + /*AID*/ + (2 + sizeof(struct ieee80211_ht_operation)) + + extra_ies_len); + + if (!skb) + return 0; + + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_SETUP_RESPONSE: + case WLAN_TDLS_SETUP_CONFIRM: + case WLAN_TDLS_TEARDOWN: + case WLAN_TDLS_DISCOVERY_REQUEST: + ret = rwnx_tdls_prepare_encap_data(rwnx_hw, rwnx_vif, peer, action_code, + dialog_token, status_code, skb); + break; + + case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: + ret = rwnx_prep_tdls_direct(rwnx_hw, rwnx_vif, peer, action_code, + dialog_token, status_code, skb); + break; + + default: + ret = -ENOTSUPP; + break; + } + + if (ret < 0) + goto fail; + + rwnx_tdls_add_ies(rwnx_hw, rwnx_vif, skb, peer, action_code, status_code, + initiator, extra_ies, extra_ies_len, oper_class, chandef); + + if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { + u64 cookie; + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + struct cfg80211_mgmt_tx_params params; + + params.len = skb->len; + params.buf = skb->data; + ret = rwnx_start_mgmt_xmit(rwnx_vif, NULL, ¶ms, false, &cookie); + #else + ret = rwnx_start_mgmt_xmit(rwnx_vif, NULL, NULL, false, 0, skb->data, skb->len, false, false, &cookie); + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ + + return ret; + } + + switch (action_code) { + case WLAN_TDLS_SETUP_REQUEST: + case WLAN_TDLS_SETUP_RESPONSE: + case WLAN_TDLS_SETUP_CONFIRM: + skb->priority = 2; + break; + default: + skb->priority = 5; + break; + } + + ret = rwnx_select_txq(rwnx_vif, skb); + ret = rwnx_start_xmit(skb, rwnx_vif->ndev); + + return ret; + +fail: + dev_kfree_skb(skb); + return ret; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tdls.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tdls.h new file mode 100644 index 0000000..9e4aa90 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tdls.h @@ -0,0 +1,54 @@ +/** + ****************************************************************************** + * + * @file rwnx_tdls.h + * + * @brief TDLS function declarations + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef RWNX_TDLS_H_ +#define RWNX_TDLS_H_ + +#include "rwnx_defs.h" + +struct ieee_types_header { + u8 element_id; + u8 len; +} __packed; + +struct ieee_types_bss_co_2040 { + struct ieee_types_header ieee_hdr; + u8 bss_2040co; +} __packed; + +struct ieee_types_extcap { + struct ieee_types_header ieee_hdr; + u8 ext_capab[8]; +} __packed; + +struct ieee_types_vht_cap { + struct ieee_types_header ieee_hdr; + struct ieee80211_vht_cap vhtcap; +} __packed; + +struct ieee_types_vht_oper { + struct ieee_types_header ieee_hdr; + struct ieee80211_vht_operation vhtoper; +} __packed; + +struct ieee_types_aid { + struct ieee_types_header ieee_hdr; + u16 aid; +} __packed; + +int rwnx_tdls_send_mgmt_packet_data(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + const u8 *peer, u8 action_code, u8 dialog_token, + u16 status_code, u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef); + +#endif /* RWNX_TDLS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_testmode.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_testmode.c new file mode 100644 index 0000000..01d7fe0 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_testmode.c @@ -0,0 +1,226 @@ +/** + **************************************************************************************** + * + * @file rwnx_testmode.c + * + * @brief Test mode function definitions + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ + +#include +#include + +#include "rwnx_testmode.h" +#include "rwnx_msg_tx.h" +#include "rwnx_dini.h" +#include "reg_access.h" + +/* + * This function handles the user application commands for register access. + * + * It retrieves command ID carried with RWNX_TM_ATTR_COMMAND and calls to the + * handlers respectively. + * + * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the + * mandatory fields(RWNX_TM_ATTR_REG_OFFSET,RWNX_TM_ATTR_REG_VALUE32) + * are missing; Otherwise 0 is replied indicating the success of the command execution. + * + * If RWNX_TM_ATTR_COMMAND is RWNX_TM_CMD_APP2DEV_REG_READ, the register read + * value is returned with RWNX_TM_ATTR_REG_VALUE32. + * + * @hw: ieee80211_hw object that represents the device + * @tb: general message fields from the user space + */ +int rwnx_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct rwnx_hw *rwnx_hw = hw->priv; + u32 mem_addr, val32; + struct sk_buff *skb; + int status = 0; + + /* First check if register address is there */ + if (!tb[RWNX_TM_ATTR_REG_OFFSET]) { + printk("Error finding register offset\n"); + return -ENOMSG; + } + + mem_addr = nla_get_u32(tb[RWNX_TM_ATTR_REG_OFFSET]); + + switch (nla_get_u32(tb[RWNX_TM_ATTR_COMMAND])) { + case RWNX_TM_CMD_APP2DEV_REG_READ: + { + struct dbg_mem_read_cfm mem_read_cfm; + + /*** Send the command to the LMAC ***/ + if ((status = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &mem_read_cfm))) + return status; + + /* Allocate the answer message */ + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + printk("Error allocating memory\n"); + return -ENOMEM; + } + + val32 = mem_read_cfm.memdata; + if (nla_put_u32(skb, RWNX_TM_ATTR_REG_VALUE32, val32)) + goto nla_put_failure; + + /* Send the answer to upper layer */ + status = cfg80211_testmode_reply(skb); + if (status < 0) + printk("Error sending msg : %d\n", status); + } + break; + + case RWNX_TM_CMD_APP2DEV_REG_WRITE: + { + if (!tb[RWNX_TM_ATTR_REG_VALUE32]) { + printk("Error finding value to write\n"); + return -ENOMSG; + } else { + val32 = nla_get_u32(tb[RWNX_TM_ATTR_REG_VALUE32]); + /* Send the command to the LMAC */ + if ((status = rwnx_send_dbg_mem_write_req(rwnx_hw, mem_addr, val32))) + return status; + } + } + break; + + default: + printk("Unknown testmode register command ID\n"); + return -ENOSYS; + } + + return status; + +nla_put_failure: + kfree_skb(skb); + return -EMSGSIZE; +} + +/* + * This function handles the user application commands for Debug filter settings. + * + * @hw: ieee80211_hw object that represents the device + * @tb: general message fields from the user space + */ +int rwnx_testmode_dbg_filter(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct rwnx_hw *rwnx_hw = hw->priv; + u32 filter; + int status = 0; + + /* First check if the filter is there */ + if (!tb[RWNX_TM_ATTR_REG_FILTER]) { + printk("Error finding filter value\n"); + return -ENOMSG; + } + + filter = nla_get_u32(tb[RWNX_TM_ATTR_REG_FILTER]); + RWNX_DBG("testmode debug filter, setting: 0x%x\n", filter); + + switch (nla_get_u32(tb[RWNX_TM_ATTR_COMMAND])) { + case RWNX_TM_CMD_APP2DEV_SET_DBGMODFILTER: + { + /* Send the command to the LMAC */ + if ((status = rwnx_send_dbg_set_mod_filter_req(rwnx_hw, filter))) + return status; + } + break; + case RWNX_TM_CMD_APP2DEV_SET_DBGSEVFILTER: + { + /* Send the command to the LMAC */ + if ((status = rwnx_send_dbg_set_sev_filter_req(rwnx_hw, filter))) + return status; + } + break; + + default: + printk("Unknown testmode register command ID\n"); + return -ENOSYS; + } + + return status; +} + +/* + * This function handles the user application commands for register access without using + * the normal LMAC messaging way. + * This time register access will be done through direct PCI BAR windows. This can be used + * to access registers even when the :AMC FW is stuck. + * + * @hw: ieee80211_hw object that represents the device + * @tb: general message fields from the user space + */ +int rwnx_testmode_reg_dbg(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct rwnx_hw *rwnx_hw = hw->priv; + struct rwnx_plat *rwnx_plat = rwnx_hw->plat; + u32 mem_addr; + struct sk_buff *skb; + int status = 0; + volatile unsigned int reg_value = 0; + unsigned int offset; + + /* First check if register address is there */ + if (!tb[RWNX_TM_ATTR_REG_OFFSET]) { + printk("Error finding register offset\n"); + return -ENOMSG; + } + + mem_addr = nla_get_u32(tb[RWNX_TM_ATTR_REG_OFFSET]); + offset = mem_addr & 0x00FFFFFF; + + switch (nla_get_u32(tb[RWNX_TM_ATTR_COMMAND])) { + case RWNX_TM_CMD_APP2DEV_REG_READ_DBG: + { + /*** Send the command to the LMAC ***/ + reg_value = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, offset); + + /* Allocate the answer message */ + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + printk("Error allocating memory\n"); + return -ENOMEM; + } + + if (nla_put_u32(skb, RWNX_TM_ATTR_REG_VALUE32, reg_value)) + goto nla_put_failure; + + /* Send the answer to upper layer */ + status = cfg80211_testmode_reply(skb); + if (status < 0) + printk("Error sending msg : %d\n", status); + } + break; + + case RWNX_TM_CMD_APP2DEV_REG_WRITE_DBG: + { + if (!tb[RWNX_TM_ATTR_REG_VALUE32]) { + printk("Error finding value to write\n"); + return -ENOMSG; + } else { + reg_value = nla_get_u32(tb[RWNX_TM_ATTR_REG_VALUE32]); + + /* Send the command to the LMAC */ + RWNX_REG_WRITE(reg_value, rwnx_plat, RWNX_ADDR_SYSTEM, + offset); + } + } + break; + + default: + printk("Unknown testmode register command ID\n"); + return -ENOSYS; + } + + return status; + +nla_put_failure: + kfree_skb(skb); + return -EMSGSIZE; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_testmode.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_testmode.h new file mode 100644 index 0000000..0115e0f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_testmode.h @@ -0,0 +1,64 @@ +/** + **************************************************************************************** + * + * @file rwnx_testmode.h + * + * @brief Test mode function declarations + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ + +#ifndef RWNX_TESTMODE_H_ +#define RWNX_TESTMODE_H_ + +#include +#include + +/* Commands from user space to kernel space(RWNX_TM_CMD_APP2DEV_XX) and + * from and kernel space to user space(RWNX_TM_CMD_DEV2APP_XX). + * The command ID is carried with RWNX_TM_ATTR_COMMAND. + */ +enum rwnx_tm_cmd_t { + /* commands from user application to access register */ + RWNX_TM_CMD_APP2DEV_REG_READ = 1, + RWNX_TM_CMD_APP2DEV_REG_WRITE, + + /* commands from user application to select the Debug levels */ + RWNX_TM_CMD_APP2DEV_SET_DBGMODFILTER, + RWNX_TM_CMD_APP2DEV_SET_DBGSEVFILTER, + + /* commands to access registers without sending messages to LMAC layer, + * this must be used when LMAC FW is stuck. */ + RWNX_TM_CMD_APP2DEV_REG_READ_DBG, + RWNX_TM_CMD_APP2DEV_REG_WRITE_DBG, + + RWNX_TM_CMD_MAX, +}; + +enum rwnx_tm_attr_t { + RWNX_TM_ATTR_NOT_APPLICABLE = 0, + + RWNX_TM_ATTR_COMMAND, + + /* When RWNX_TM_ATTR_COMMAND is RWNX_TM_CMD_APP2DEV_REG_XXX, + * The mandatory fields are: + * RWNX_TM_ATTR_REG_OFFSET for the offset of the target register; + * RWNX_TM_ATTR_REG_VALUE32 for value */ + RWNX_TM_ATTR_REG_OFFSET, + RWNX_TM_ATTR_REG_VALUE32, + + /* When RWNX_TM_ATTR_COMMAND is RWNX_TM_CMD_APP2DEV_SET_DBGXXXFILTER, + * The mandatory field is RWNX_TM_ATTR_REG_FILTER. */ + RWNX_TM_ATTR_REG_FILTER, + + RWNX_TM_ATTR_MAX, +}; + +/***********************************************************************/ +int rwnx_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb); +int rwnx_testmode_dbg_filter(struct ieee80211_hw *hw, struct nlattr **tb); +int rwnx_testmode_reg_dbg(struct ieee80211_hw *hw, struct nlattr **tb); + +#endif /* RWNX_TESTMODE_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tx.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tx.c new file mode 100755 index 0000000..6015f4d --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tx.c @@ -0,0 +1,2492 @@ +/** + ****************************************************************************** + * + * @file rwnx_tx.c + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#include +#include +#include + +#include "rwnx_defs.h" +#include "rwnx_tx.h" +#include "rwnx_msg_tx.h" +#include "rwnx_mesh.h" +#include "rwnx_events.h" +#include "rwnx_compat.h" +#include "aicwf_txrxif.h" +#include "rwnx_main.h" +#ifdef CONFIG_RWNX_MON_XMIT +#include +#endif + +/****************************************************************************** + * Power Save functions + *****************************************************************************/ +/** + * rwnx_set_traffic_status - Inform FW if traffic is available for STA in PS + * + * @rwnx_hw: Driver main data + * @sta: Sta in PS mode + * @available: whether traffic is buffered for the STA + * @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID) + */ +void rwnx_set_traffic_status(struct rwnx_hw *rwnx_hw, + struct rwnx_sta *sta, + bool available, + u8 ps_id) +{ + if (sta->tdls.active) { + rwnx_send_tdls_peer_traffic_ind_req(rwnx_hw, + rwnx_hw->vif_table[sta->vif_idx]); + } else { + bool uapsd = (ps_id != LEGACY_PS_ID); + rwnx_send_me_traffic_ind(rwnx_hw, sta->sta_idx, uapsd, available); +#ifdef CREATE_TRACE_POINTS + trace_ps_traffic_update(sta->sta_idx, available, uapsd); +#endif + } +} + +/** + * rwnx_ps_bh_enable - Enable/disable PS mode for one STA + * + * @rwnx_hw: Driver main data + * @sta: Sta which enters/leaves PS mode + * @enable: PS mode status + * + * This function will enable/disable PS mode for one STA. + * When enabling PS mode: + * - Stop all STA's txq for RWNX_TXQ_STOP_STA_PS reason + * - Count how many buffers are already ready for this STA + * - For BC/MC sta, update all queued SKB to use hw_queue BCMC + * - Update TIM if some packet are ready + * + * When disabling PS mode: + * - Start all STA's txq for RWNX_TXQ_STOP_STA_PS reason + * - For BC/MC sta, update all queued SKB to use hw_queue AC_BE + * - Update TIM if some packet are ready (otherwise fw will not update TIM + * in beacon for this STA) + * + * All counter/skb updates are protected from TX path by taking tx_lock + * + * NOTE: _bh_ in function name indicates that this function is called + * from a bottom_half tasklet. + */ +void rwnx_ps_bh_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + bool enable) +{ + struct rwnx_txq *txq; + + if (enable) { +#ifdef CREATE_TRACE_POINTS + trace_ps_enable(sta); +#endif + spin_lock_bh(&rwnx_hw->tx_lock); + sta->ps.active = true; + sta->ps.sp_cnt[LEGACY_PS_ID] = 0; + sta->ps.sp_cnt[UAPSD_ID] = 0; + rwnx_txq_sta_stop(sta, RWNX_TXQ_STOP_STA_PS, rwnx_hw); + + if (is_multicast_sta(sta->sta_idx)) { + txq = rwnx_txq_sta_get(sta, 0, rwnx_hw); + sta->ps.pkt_ready[LEGACY_PS_ID] = skb_queue_len(&txq->sk_list); + sta->ps.pkt_ready[UAPSD_ID] = 0; + //txq->hwq = &rwnx_hw->hwq[RWNX_HWQ_BCMC]; + } else { + int i; + sta->ps.pkt_ready[LEGACY_PS_ID] = 0; + sta->ps.pkt_ready[UAPSD_ID] = 0; + foreach_sta_txq(sta, txq, i, rwnx_hw) { + sta->ps.pkt_ready[txq->ps_id] += skb_queue_len(&txq->sk_list); + } + } + + spin_unlock_bh(&rwnx_hw->tx_lock); + + //if (sta->ps.pkt_ready[LEGACY_PS_ID]) + //rwnx_set_traffic_status(rwnx_hw, sta, true, LEGACY_PS_ID); + + //if (sta->ps.pkt_ready[UAPSD_ID]) + // rwnx_set_traffic_status(rwnx_hw, sta, true, UAPSD_ID); + } else { +#ifdef CREATE_TRACE_POINTS + trace_ps_disable(sta->sta_idx); +#endif + spin_lock_bh(&rwnx_hw->tx_lock); + sta->ps.active = false; + + if (is_multicast_sta(sta->sta_idx)) { + txq = rwnx_txq_sta_get(sta, 0, rwnx_hw); + txq->hwq = &rwnx_hw->hwq[RWNX_HWQ_BE]; + txq->push_limit = 0; + } else { + int i; + foreach_sta_txq(sta, txq, i, rwnx_hw) { + txq->push_limit = 0; + } + } + + rwnx_txq_sta_start(sta, RWNX_TXQ_STOP_STA_PS, rwnx_hw); + spin_unlock_bh(&rwnx_hw->tx_lock); + + if (sta->ps.pkt_ready[LEGACY_PS_ID]) + rwnx_set_traffic_status(rwnx_hw, sta, false, LEGACY_PS_ID); + + //if (sta->ps.pkt_ready[UAPSD_ID]) + // rwnx_set_traffic_status(rwnx_hw, sta, false, UAPSD_ID); + + tasklet_schedule(&rwnx_hw->task); + } +} + +/** + * rwnx_ps_bh_traffic_req - Handle traffic request for STA in PS mode + * + * @rwnx_hw: Driver main data + * @sta: Sta which enters/leaves PS mode + * @pkt_req: number of pkt to push + * @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID) + * + * This function will make sure that @pkt_req are pushed to fw + * whereas the STA is in PS mode. + * If request is 0, send all traffic + * If request is greater than available pkt, reduce request + * Note: request will also be reduce if txq credits are not available + * + * All counter updates are protected from TX path by taking tx_lock + * + * NOTE: _bh_ in function name indicates that this function is called + * from the bottom_half tasklet. + */ +void rwnx_ps_bh_traffic_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + u16 pkt_req, u8 ps_id) +{ + int pkt_ready_all; + struct rwnx_txq *txq; + int schedule = 0; + + //if (WARN(!sta->ps.active, "sta %pM is not in Power Save mode", + // sta->mac_addr)) + // return; + if(!sta->ps.active) { + //AICWFDBG(LOGERROR, "sta(%d) %pM is not in Power Save mode", sta->sta_idx, sta->mac_addr); + return; + } +#ifdef CREATE_TRACE_POINTS + trace_ps_traffic_req(sta, pkt_req, ps_id); +#endif + spin_lock_bh(&rwnx_hw->tx_lock); + + /* Fw may ask to stop a service period with PS_SP_INTERRUPTED. This only + happens for p2p-go interface if NOA starts during a service period */ + if ((pkt_req == PS_SP_INTERRUPTED) && (ps_id == UAPSD_ID)) { + int tid; + sta->ps.sp_cnt[ps_id] = 0; + foreach_sta_txq(sta, txq, tid, rwnx_hw) { + txq->push_limit = 0; + } + goto done; + } + + pkt_ready_all = (sta->ps.pkt_ready[ps_id] - sta->ps.sp_cnt[ps_id]); + + /* Don't start SP until previous one is finished or we don't have + packet ready (which must not happen for U-APSD) */ + if (sta->ps.sp_cnt[ps_id] || pkt_ready_all <= 0) { + goto done; + } + + /* Adapt request to what is available. */ + if (pkt_req == 0 || pkt_req > pkt_ready_all) { + pkt_req = pkt_ready_all; + } + + /* Reset the SP counter */ + sta->ps.sp_cnt[ps_id] = 0; + schedule = 1; + + /* "dispatch" the request between txq */ + if (is_multicast_sta(sta->sta_idx)) { + txq = rwnx_txq_sta_get(sta, 0, rwnx_hw); + //if (txq->credits <= 0) + // goto done; + if (pkt_req > txq->credits) + pkt_req = txq->credits; + txq->push_limit = pkt_req; + sta->ps.sp_cnt[ps_id] = pkt_req; + rwnx_txq_add_to_hw_list(txq); + } else { + int i, tid; + + foreach_sta_txq_prio(sta, txq, tid, i, rwnx_hw) { + u16 txq_len = skb_queue_len(&txq->sk_list); + + if (txq->ps_id != ps_id) + continue; + + if (txq_len > txq->credits) + txq_len = txq->credits; + + if (txq_len == 0) + continue; + + if (txq_len < pkt_req) { + /* Not enough pkt queued in this txq, add this + txq to hwq list and process next txq */ + pkt_req -= txq_len; + txq->push_limit = txq_len; + sta->ps.sp_cnt[ps_id] += txq_len; + rwnx_txq_add_to_hw_list(txq); + } else { + /* Enough pkt in this txq to comlete the request + add this txq to hwq list and stop processing txq */ + txq->push_limit = pkt_req; + sta->ps.sp_cnt[ps_id] += pkt_req; + rwnx_txq_add_to_hw_list(txq); + break; + } + } + } + + done: + spin_unlock_bh(&rwnx_hw->tx_lock); + if(schedule) + tasklet_schedule(&rwnx_hw->task); +} + +/****************************************************************************** + * TX functions + *****************************************************************************/ +#define PRIO_STA_NULL 0xAA + +static const int rwnx_down_hwq2tid[3] = { + [RWNX_HWQ_BK] = 2, + [RWNX_HWQ_BE] = 3, + [RWNX_HWQ_VI] = 5, +}; + +static void rwnx_downgrade_ac(struct rwnx_sta *sta, struct sk_buff *skb) +{ + int8_t ac = rwnx_tid2hwq[skb->priority]; + + if (WARN((ac > RWNX_HWQ_VO), + "Unexepcted ac %d for skb before downgrade", ac)) + ac = RWNX_HWQ_VO; + + while (sta->acm & BIT(ac)) { + if (ac == RWNX_HWQ_BK) { + skb->priority = 1; + return; + } + ac--; + skb->priority = rwnx_down_hwq2tid[ac]; + } +} + +u16 rwnx_select_txq(struct rwnx_vif *rwnx_vif, struct sk_buff *skb) +{ + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct wireless_dev *wdev = &rwnx_vif->wdev; + struct rwnx_sta *sta = NULL; + struct rwnx_txq *txq; + u16 netdev_queue; + bool tdls_mgmgt_frame = false; + int nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX; + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC; + } + + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + { + struct ethhdr *eth; + eth = (struct ethhdr *)skb->data; + if (eth->h_proto == cpu_to_be16(ETH_P_TDLS)) { + tdls_mgmgt_frame = true; + } + if ((rwnx_vif->tdls_status == TDLS_LINK_ACTIVE) && + (rwnx_vif->sta.tdls_sta != NULL) && + (memcmp(eth->h_dest, rwnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN) == 0)) + sta = rwnx_vif->sta.tdls_sta; + else + sta = rwnx_vif->sta.ap; + break; + } + case NL80211_IFTYPE_AP_VLAN: + if (rwnx_vif->ap_vlan.sta_4a) { + sta = rwnx_vif->ap_vlan.sta_4a; + break; + } + + /* AP_VLAN interface is not used for a 4A STA, + fallback searching sta amongs all AP's clients */ + rwnx_vif = rwnx_vif->ap_vlan.master; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + { + struct rwnx_sta *cur; + struct ethhdr *eth = (struct ethhdr *)skb->data; + + if (is_multicast_ether_addr(eth->h_dest)) { + sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index]; + } else { + spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock); + list_for_each_entry(cur, &rwnx_vif->ap.sta_list, list) { + if (!memcmp(cur->mac_addr, eth->h_dest, ETH_ALEN)) { + sta = cur; + break; + } + } + spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock); + } + + break; + } + case NL80211_IFTYPE_MESH_POINT: + { + struct ethhdr *eth = (struct ethhdr *)skb->data; + + if (!rwnx_vif->is_resending) { + /* + * If ethernet source address is not the address of a mesh wireless interface, we are proxy for + * this address and have to inform the HW + */ + if (memcmp(ð->h_source[0], &rwnx_vif->ndev->perm_addr[0], ETH_ALEN)) { + /* Check if LMAC is already informed */ + if (!rwnx_get_mesh_proxy_info(rwnx_vif, (u8 *)ð->h_source, true)) { + rwnx_send_mesh_proxy_add_req(rwnx_hw, rwnx_vif, (u8 *)ð->h_source); + } + } + } + + if (is_multicast_ether_addr(eth->h_dest)) { + sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index]; + } else { + /* Path to be used */ + struct rwnx_mesh_path *p_mesh_path = NULL; + struct rwnx_mesh_path *p_cur_path; + /* Check if destination is proxied by a peer Mesh STA */ + struct rwnx_mesh_proxy *p_mesh_proxy = rwnx_get_mesh_proxy_info(rwnx_vif, (u8 *)ð->h_dest, false); + /* Mesh Target address */ + struct mac_addr *p_tgt_mac_addr; + + if (p_mesh_proxy) { + p_tgt_mac_addr = &p_mesh_proxy->proxy_addr; + } else { + p_tgt_mac_addr = (struct mac_addr *)ð->h_dest; + } + + /* Look for path with provided target address */ + list_for_each_entry(p_cur_path, &rwnx_vif->ap.mpath_list, list) { + if (!memcmp(&p_cur_path->tgt_mac_addr, p_tgt_mac_addr, ETH_ALEN)) { + p_mesh_path = p_cur_path; + break; + } + } + + if (p_mesh_path) { + sta = p_mesh_path->p_nhop_sta; + } else { + rwnx_send_mesh_path_create_req(rwnx_hw, rwnx_vif, (u8 *)p_tgt_mac_addr); + } + } + + break; + } + default: + break; + } + + if (sta && sta->qos) + { + if (tdls_mgmgt_frame) { + skb_set_queue_mapping(skb, NX_STA_NDEV_IDX(skb->priority, sta->sta_idx)); + } else { + /* use the data classifier to determine what 802.1d tag the + * data frame has */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) + skb->priority = cfg80211_classify8021d(skb) & IEEE80211_QOS_CTL_TAG1D_MASK; + #else + skb->priority = cfg80211_classify8021d(skb, NULL) & IEEE80211_QOS_CTL_TAG1D_MASK; + #endif + } + if (sta->acm) + rwnx_downgrade_ac(sta, skb); + + txq = rwnx_txq_sta_get(sta, skb->priority, rwnx_hw); + netdev_queue = txq->ndev_idx; + } + else if (sta) + { + skb->priority = 0xFF; + txq = rwnx_txq_sta_get(sta, 0, rwnx_hw); + netdev_queue = txq->ndev_idx; + } + else + { + /* This packet will be dropped in xmit function, still need to select + an active queue for xmit to be called. As it most likely to happen + for AP interface, select BCMC queue + (TODO: select another queue if BCMC queue is stopped) */ + skb->priority = PRIO_STA_NULL; + netdev_queue = nx_bcmc_txq_ndev_idx; + } + +#ifndef CONFIG_ONE_TXQ + BUG_ON(netdev_queue >= NX_NB_NDEV_TXQ); +#endif + + return netdev_queue; +} + +/** + * rwnx_set_more_data_flag - Update MORE_DATA flag in tx sw desc + * + * @rwnx_hw: Driver main data + * @sw_txhdr: Header for pkt to be pushed + * + * If STA is in PS mode + * - Set EOSP in case the packet is the last of the UAPSD service period + * - Set MORE_DATA flag if more pkt are ready for this sta + * - Update TIM if this is the last pkt buffered for this sta + * + * note: tx_lock already taken. + */ +static inline void rwnx_set_more_data_flag(struct rwnx_hw *rwnx_hw, + struct rwnx_sw_txhdr *sw_txhdr) +{ + struct rwnx_sta *sta = sw_txhdr->rwnx_sta; + struct rwnx_vif *vif = sw_txhdr->rwnx_vif; + struct rwnx_txq *txq = sw_txhdr->txq; + + if (unlikely(sta->ps.active)) { + sta->ps.pkt_ready[txq->ps_id]--; + sta->ps.sp_cnt[txq->ps_id]--; +#ifdef CREATE_TRACE_POINTS + trace_ps_push(sta); +#endif + if (((txq->ps_id == UAPSD_ID) || (vif->wdev.iftype == NL80211_IFTYPE_MESH_POINT) || (sta->tdls.active)) + && !sta->ps.sp_cnt[txq->ps_id]) { + sw_txhdr->desc.host.flags |= TXU_CNTRL_EOSP; + } + + if (sta->ps.pkt_ready[txq->ps_id]) { + sw_txhdr->desc.host.flags |= TXU_CNTRL_MORE_DATA; + } else { + rwnx_set_traffic_status(rwnx_hw, sta, false, txq->ps_id); + } + } +} + +/** + * rwnx_get_tx_priv - Get STA and tid for one skb + * + * @rwnx_vif: vif ptr + * @skb: skb + * @tid: pointer updated with the tid to use for this skb + * + * @return: pointer on the destination STA (may be NULL) + * + * skb has already been parsed in rwnx_select_queue function + * simply re-read information form skb. + */ +static struct rwnx_sta *rwnx_get_tx_priv(struct rwnx_vif *rwnx_vif, + struct sk_buff *skb, + u8 *tid) +{ + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_sta *sta; + int sta_idx; + int nx_remote_sta_max = NX_REMOTE_STA_MAX; + int nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX; + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_remote_sta_max = NX_REMOTE_STA_MAX_FOR_OLD_IC; + nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC; + } + + + *tid = skb->priority; + if (unlikely(skb->priority == PRIO_STA_NULL)) { + return NULL; + } else { + int ndev_idx = skb_get_queue_mapping(skb); + + if (ndev_idx == nx_bcmc_txq_ndev_idx) + sta_idx = nx_remote_sta_max + master_vif_idx(rwnx_vif); + else + sta_idx = ndev_idx / NX_NB_TID_PER_STA; + + sta = &rwnx_hw->sta_table[sta_idx]; + } + + return sta; +} + +/** + * rwnx_prep_tx - Prepare buffer for DMA transmission + * + * @rwnx_hw: Driver main data + * @txhdr: Tx descriptor + * + * Maps hw_txhdr and buffer data for transmission via DMA. + * - Data buffer with be downloaded by embebded side. + * - hw_txhdr will be uploaded by embedded side when buffer has been + * transmitted over the air. + */ +static int rwnx_prep_tx(struct rwnx_hw *rwnx_hw, struct rwnx_txhdr *txhdr) +{ +#if 0 + struct rwnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr; + struct rwnx_hw_txhdr *hw_txhdr = &txhdr->hw_hdr; + dma_addr_t dma_addr; + + /* MAP (and sync) memory for DMA */ + dma_addr = dma_map_single(rwnx_hw->dev, hw_txhdr, + sw_txhdr->map_len, DMA_BIDIRECTIONAL); + if (WARN_ON(dma_mapping_error(rwnx_hw->dev, dma_addr))) + return -1; + + sw_txhdr->dma_addr = dma_addr; +#endif + return 0; +} + +/** + * rwnx_tx_push - Push one packet to fw + * + * @rwnx_hw: Driver main data + * @txhdr: tx desc of the buffer to push + * @flags: push flags (see @rwnx_push_flags) + * + * Push one packet to fw. Sw desc of the packet has already been updated. + * Only MORE_DATA flag will be set if needed. + */ +void rwnx_tx_push(struct rwnx_hw *rwnx_hw, struct rwnx_txhdr *txhdr, int flags) +{ + struct rwnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr; + struct sk_buff *skb = sw_txhdr->skb; + struct rwnx_txq *txq = sw_txhdr->txq; + u16 hw_queue = txq->hwq->id; + int user = 0; + + lockdep_assert_held(&rwnx_hw->tx_lock); + + //printk("rwnx_tx_push\n"); + /* RETRY flag is not always set so retest here */ + if (txq->nb_retry) { + flags |= RWNX_PUSH_RETRY; + txq->nb_retry--; + if (txq->nb_retry == 0) { + WARN(skb != txq->last_retry_skb, + "last retry buffer is not the expected one"); + txq->last_retry_skb = NULL; + } + } else if (!(flags & RWNX_PUSH_RETRY)) { + txq->pkt_sent++; + } + +#ifdef CONFIG_RWNX_AMSDUS_TX + if (txq->amsdu == sw_txhdr) { + WARN((flags & RWNX_PUSH_RETRY), "End A-MSDU on a retry"); + rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++; + txq->amsdu = NULL; + } else if (!(flags & RWNX_PUSH_RETRY) && + !(sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU)) { + rwnx_hw->stats.amsdus[0].done++; + } +#endif /* CONFIG_RWNX_AMSDUS_TX */ + + /* Wait here to update hw_queue, as for multicast STA hwq may change + between queue and push (because of PS) */ + sw_txhdr->hw_queue = hw_queue; + + //sw_txhdr->desc.host.packet_addr = hw_queue; //use packet_addr field for hw_txq + sw_txhdr->desc.host.ac = hw_queue; //use ac field for hw_txq +#ifdef CONFIG_RWNX_MUMIMO_TX + /* MU group is only selected during hwq processing */ + sw_txhdr->desc.host.mumimo_info = txq->mumimo_info; + user = RWNX_TXQ_POS_ID(txq); +#endif /* CONFIG_RWNX_MUMIMO_TX */ + + if (sw_txhdr->rwnx_sta) { + /* only for AP mode */ + rwnx_set_more_data_flag(rwnx_hw, sw_txhdr); + } +#ifdef CREATE_TRACE_POINTS + trace_push_desc(skb, sw_txhdr, flags); +#endif + #if 0 + txq->credits--; + #endif + txq->pkt_pushed[user]++; + //printk("txq->credits=%d\n",txq->credits); + #if 0 + if (txq->credits <= 0) + rwnx_txq_stop(txq, RWNX_TXQ_STOP_FULL); + #endif + + if (txq->push_limit) + txq->push_limit--; +#if 0 + rwnx_ipc_txdesc_push(rwnx_hw, &sw_txhdr->desc, skb, hw_queue, user); +#else +#ifdef AICWF_SDIO_SUPPORT + if( ((sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT) && \ + ((*(skb->data+sw_txhdr->headroom)==0xd0) || (*(skb->data+sw_txhdr->headroom)==0x10) || (*(skb->data+sw_txhdr->headroom)==0x30))) || \ + (sw_txhdr->desc.host.ethertype == 0x8e88) ) { // 0xd0:Action, 0x10:AssocRsp, 0x8e88:EAPOL + sw_txhdr->need_cfm = 1; + sw_txhdr->desc.host.status_desc_addr = ((1<<31) | rwnx_hw->sdio_env.txdesc_free_idx[0]); + aicwf_sdio_host_txdesc_push(&(rwnx_hw->sdio_env), 0, (long)skb); + AICWFDBG(LOGINFO, "need cfm ethertype:%8x,user_idx=%d, skb=%p sta_idx:%d\n", + sw_txhdr->desc.host.ethertype, + rwnx_hw->sdio_env.txdesc_free_idx[0], + skb, + sw_txhdr->desc.host.staid); + } else { + sw_txhdr->need_cfm = 0; + if (sw_txhdr->raw_frame) { + sw_txhdr->desc.host.flags |= TXU_CNTRL_MGMT; + } + if (sw_txhdr->fixed_rate) { + sw_txhdr->desc.host.status_desc_addr = (0x01UL << 30) | sw_txhdr->rate_config; + } else { + sw_txhdr->desc.host.status_desc_addr = 0; + } + + sw_txhdr->rwnx_vif->net_stats.tx_packets++; + sw_txhdr->rwnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len; + rwnx_hw->stats.last_tx = jiffies; + } + aicwf_frame_tx((void *)(rwnx_hw->sdiodev), skb); +#endif +#ifdef AICWF_USB_SUPPORT + if( ((sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT) && \ + ((*(skb->data+sw_txhdr->headroom)==0xd0) || (*(skb->data+sw_txhdr->headroom)==0x10) || (*(skb->data+sw_txhdr->headroom)==0x30))) || \ + (sw_txhdr->desc.host.ethertype == 0x8e88) || (sw_txhdr->desc.host.ethertype == 0xb488)) { + // 0xd0:Action, 0x10:AssocRsp, 0x8e88:EAPOL, 0xb488: WAPI + sw_txhdr->need_cfm = 1; + sw_txhdr->desc.host.status_desc_addr = ((1<<31) | rwnx_hw->usb_env.txdesc_free_idx[0]); + aicwf_usb_host_txdesc_push(&(rwnx_hw->usb_env), 0, (long)(skb)); + if((sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT)) + AICWFDBG(LOGINFO, "need cfm mgmt:%x,user_idx=%d, skb=%p sta_idx:%d\n", + *(skb->data+sw_txhdr->headroom), + rwnx_hw->usb_env.txdesc_free_idx[0], + skb, + sw_txhdr->desc.host.staid); + else + AICWFDBG(LOGINFO, "need cfm ethertype:%8x,user_idx=%d, skb=%p sta_idx:%d\n", + sw_txhdr->desc.host.ethertype, + rwnx_hw->usb_env.txdesc_free_idx[0], + skb, + sw_txhdr->desc.host.staid); + } else { + sw_txhdr->need_cfm = 0; + if (sw_txhdr->raw_frame) { + sw_txhdr->desc.host.flags |= TXU_CNTRL_MGMT; + } + if (sw_txhdr->fixed_rate) { + sw_txhdr->desc.host.status_desc_addr = (0x01UL << 30) | sw_txhdr->rate_config; + } else { + sw_txhdr->desc.host.status_desc_addr = 0; + } + + sw_txhdr->rwnx_vif->net_stats.tx_packets++; + sw_txhdr->rwnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len; + rwnx_hw->stats.last_tx = jiffies; + } + aicwf_frame_tx((void *)(rwnx_hw->usbdev), skb); +#endif +#endif + #if 0 + txq->hwq->credits[user]--; + #endif + rwnx_hw->stats.cfm_balance[hw_queue]++; +} + + + +/** + * rwnx_tx_retry - Push an AMPDU pkt that need to be retried + * + * @rwnx_hw: Driver main data + * @skb: pkt to re-push + * @txhdr: tx desc of the pkt to re-push + * @sw_retry: Indicates if fw decide to retry this buffer + * (i.e. it has never been transmitted over the air) + * + * Called when a packet needs to be repushed to the firmware. + * First update sw descriptor and then queue it in the retry list. + */ +static void rwnx_tx_retry(struct rwnx_hw *rwnx_hw, struct sk_buff *skb, + struct rwnx_txhdr *txhdr, bool sw_retry) +{ + struct rwnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr; + struct tx_cfm_tag *cfm = &txhdr->hw_hdr.cfm; + struct rwnx_txq *txq = sw_txhdr->txq; + int peek_off = offsetof(struct rwnx_hw_txhdr, cfm); + int peek_len = sizeof(((struct rwnx_hw_txhdr *)0)->cfm); + + if (!sw_retry) { + /* update sw desc */ + #if 0 + sw_txhdr->desc.host.sn = cfm->sn; + sw_txhdr->desc.host.pn[0] = cfm->pn[0]; + sw_txhdr->desc.host.pn[1] = cfm->pn[1]; + sw_txhdr->desc.host.pn[2] = cfm->pn[2]; + sw_txhdr->desc.host.pn[3] = cfm->pn[3]; + sw_txhdr->desc.host.timestamp = cfm->timestamp; + #endif + sw_txhdr->desc.host.flags |= TXU_CNTRL_RETRY; + + #ifdef CONFIG_RWNX_AMSDUS_TX + if (sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU) + rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].failed++; + #endif + } + + /* MORE_DATA will be re-set if needed when pkt will be repushed */ + sw_txhdr->desc.host.flags &= ~TXU_CNTRL_MORE_DATA; + + cfm->status.value = 0; + dma_sync_single_for_device(rwnx_hw->dev, sw_txhdr->dma_addr + peek_off, + peek_len, DMA_BIDIRECTIONAL); + + txq->credits++; + if (txq->credits > 0) + rwnx_txq_start(txq, RWNX_TXQ_STOP_FULL); + + /* Queue the buffer */ + rwnx_txq_queue_skb(skb, txq, rwnx_hw, true); +} + + +#ifdef CONFIG_RWNX_AMSDUS_TX +/* return size of subframe (including header) */ +static inline int rwnx_amsdu_subframe_length(struct ethhdr *eth, int eth_len) +{ + /* ethernet header is replaced with amdsu header that have the same size + Only need to check if LLC/SNAP header will be added */ + int len = eth_len; + + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { + len += sizeof(rfc1042_header) + 2; + } + + return len; +} + +static inline bool rwnx_amsdu_is_aggregable(struct sk_buff *skb) +{ + /* need to add some check on buffer to see if it can be aggregated ? */ + return true; +} + + +/** + * rwnx_amsdu_del_subframe_header - remove AMSDU header + * + * amsdu_txhdr: amsdu tx descriptor + * + * Move back the ethernet header at the "beginning" of the data buffer. + * (which has been moved in @rwnx_amsdu_add_subframe_header) + */ +static void rwnx_amsdu_del_subframe_header(struct rwnx_amsdu_txhdr *amsdu_txhdr) +{ + struct sk_buff *skb = amsdu_txhdr->skb; + struct ethhdr *eth; + u8 *pos; + + pos = skb->data; + pos += sizeof(struct rwnx_amsdu_txhdr); + eth = (struct ethhdr*)pos; + pos += amsdu_txhdr->pad + sizeof(struct ethhdr); + + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { + pos += sizeof(rfc1042_header) + 2; + } + + memmove(pos, eth, sizeof(*eth)); + skb_pull(skb, (pos - skb->data)); +} + +/** + * rwnx_amsdu_add_subframe_header - Add AMSDU header and link subframe + * + * @rwnx_hw Driver main data + * @skb Buffer to aggregate + * @sw_txhdr Tx descriptor for the first A-MSDU subframe + * + * return 0 on sucess, -1 otherwise + * + * This functions Add A-MSDU header and LLC/SNAP header in the buffer + * and update sw_txhdr of the first subframe to link this buffer. + * If an error happens, the buffer will be queued as a normal buffer. + * + * + * Before After + * +-------------+ +-------------+ + * | HEADROOM | | HEADROOM | + * | | +-------------+ <- data + * | | | amsdu_txhdr | + * | | | * pad size | + * | | +-------------+ + * | | | ETH hdr | keep original eth hdr + * | | | | to restore it once transmitted + * | | +-------------+ <- packet_addr[x] + * | | | Pad | + * | | +-------------+ + * data -> +-------------+ | AMSDU HDR | + * | ETH hdr | +-------------+ + * | | | LLC/SNAP | + * +-------------+ +-------------+ + * | DATA | | DATA | + * | | | | + * +-------------+ +-------------+ + * + * Called with tx_lock hold + */ +static int rwnx_amsdu_add_subframe_header(struct rwnx_hw *rwnx_hw, + struct sk_buff *skb, + struct rwnx_sw_txhdr *sw_txhdr) +{ + struct rwnx_amsdu *amsdu = &sw_txhdr->amsdu; + struct rwnx_amsdu_txhdr *amsdu_txhdr; + struct ethhdr *amsdu_hdr, *eth = (struct ethhdr *)skb->data; + int headroom_need, map_len, msdu_len; + dma_addr_t dma_addr; + u8 *pos, *map_start; + + msdu_len = skb->len - sizeof(*eth); + headroom_need = sizeof(*amsdu_txhdr) + amsdu->pad + + sizeof(*amsdu_hdr); + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { + headroom_need += sizeof(rfc1042_header) + 2; + msdu_len += sizeof(rfc1042_header) + 2; + } + + /* we should have enough headroom (checked in xmit) */ + if (WARN_ON(skb_headroom(skb) < headroom_need)) { + return -1; + } + + /* allocate headroom */ + pos = skb_push(skb, headroom_need); + amsdu_txhdr = (struct rwnx_amsdu_txhdr *)pos; + pos += sizeof(*amsdu_txhdr); + + /* move eth header */ + memmove(pos, eth, sizeof(*eth)); + eth = (struct ethhdr *)pos; + pos += sizeof(*eth); + + /* Add padding from previous subframe */ + map_start = pos; + memset(pos, 0, amsdu->pad); + pos += amsdu->pad; + + /* Add AMSDU hdr */ + amsdu_hdr = (struct ethhdr *)pos; + memcpy(amsdu_hdr->h_dest, eth->h_dest, ETH_ALEN); + memcpy(amsdu_hdr->h_source, eth->h_source, ETH_ALEN); + amsdu_hdr->h_proto = htons(msdu_len); + pos += sizeof(*amsdu_hdr); + + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { + memcpy(pos, rfc1042_header, sizeof(rfc1042_header)); + pos += sizeof(rfc1042_header); + } + + /* MAP (and sync) memory for DMA */ + map_len = msdu_len + amsdu->pad + sizeof(*amsdu_hdr); + dma_addr = dma_map_single(rwnx_hw->dev, map_start, map_len, + DMA_BIDIRECTIONAL); + if (WARN_ON(dma_mapping_error(rwnx_hw->dev, dma_addr))) { + pos -= sizeof(*eth); + memmove(pos, eth, sizeof(*eth)); + skb_pull(skb, headroom_need); + return -1; + } + + /* update amdsu_txhdr */ + amsdu_txhdr->map_len = map_len; + amsdu_txhdr->dma_addr = dma_addr; + amsdu_txhdr->skb = skb; + amsdu_txhdr->pad = amsdu->pad; + amsdu_txhdr->msdu_len = msdu_len; + + /* update rwnx_sw_txhdr (of the first subframe) */ + BUG_ON(amsdu->nb != sw_txhdr->desc.host.packet_cnt); + sw_txhdr->desc.host.packet_addr[amsdu->nb] = dma_addr; + sw_txhdr->desc.host.packet_len[amsdu->nb] = map_len; + sw_txhdr->desc.host.packet_cnt++; + amsdu->nb++; + + amsdu->pad = AMSDU_PADDING(map_len - amsdu->pad); + list_add_tail(&amsdu_txhdr->list, &amsdu->hdrs); + amsdu->len += map_len; + + rwnx_ipc_sta_buffer(rwnx_hw, sw_txhdr->txq->sta, + sw_txhdr->txq->tid, msdu_len); + + trace_amsdu_subframe(sw_txhdr); + return 0; +} + +/** + * rwnx_amsdu_add_subframe - Add this buffer as an A-MSDU subframe if possible + * + * @rwnx_hw Driver main data + * @skb Buffer to aggregate if possible + * @sta Destination STA + * @txq sta's txq used for this buffer + * + * Tyr to aggregate the buffer in an A-MSDU. If it succeed then the + * buffer is added as a new A-MSDU subframe with AMSDU and LLC/SNAP + * headers added (so FW won't have to modify this subframe). + * + * To be added as subframe : + * - sta must allow amsdu + * - buffer must be aggregable (to be defined) + * - at least one other aggregable buffer is pending in the queue + * or an a-msdu (with enough free space) is currently in progress + * + * returns true if buffer has been added as A-MDSP subframe, false otherwise + * + */ +static bool rwnx_amsdu_add_subframe(struct rwnx_hw *rwnx_hw, struct sk_buff *skb, + struct rwnx_sta *sta, struct rwnx_txq *txq) +{ + bool res = false; + struct ethhdr *eth; + + /* immediately return if amsdu are not allowed for this sta */ + if (!txq->amsdu_len || rwnx_hw->mod_params->amsdu_maxnb < 2 || + !rwnx_amsdu_is_aggregable(skb) + ) + return false; + + spin_lock_bh(&rwnx_hw->tx_lock); + if (txq->amsdu) { + /* aggreagation already in progress, add this buffer if enough space + available, otherwise end the current amsdu */ + struct rwnx_sw_txhdr *sw_txhdr = txq->amsdu; + eth = (struct ethhdr *)(skb->data); + + if (((sw_txhdr->amsdu.len + sw_txhdr->amsdu.pad + + rwnx_amsdu_subframe_length(eth, skb->len)) > txq->amsdu_len) || + rwnx_amsdu_add_subframe_header(rwnx_hw, skb, sw_txhdr)) { + txq->amsdu = NULL; + goto end; + } + + if (sw_txhdr->amsdu.nb >= rwnx_hw->mod_params->amsdu_maxnb) { + rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++; + /* max number of subframes reached */ + txq->amsdu = NULL; + } + } else { + /* Check if a new amsdu can be started with the previous buffer + (if any) and this one */ + struct sk_buff *skb_prev = skb_peek_tail(&txq->sk_list); + struct rwnx_txhdr *txhdr; + struct rwnx_sw_txhdr *sw_txhdr; + int len1, len2; + + if (!skb_prev || !rwnx_amsdu_is_aggregable(skb_prev)) + goto end; + + txhdr = (struct rwnx_txhdr *)skb_prev->data; + sw_txhdr = txhdr->sw_hdr; + if ((sw_txhdr->amsdu.len) || + (sw_txhdr->desc.host.flags & TXU_CNTRL_RETRY)) + /* previous buffer is already a complete amsdu or a retry */ + goto end; + + eth = (struct ethhdr *)(skb_prev->data + sw_txhdr->headroom); + len1 = rwnx_amsdu_subframe_length(eth, (sw_txhdr->frame_len + + sizeof(struct ethhdr))); + + eth = (struct ethhdr *)(skb->data); + len2 = rwnx_amsdu_subframe_length(eth, skb->len); + + if (len1 + AMSDU_PADDING(len1) + len2 > txq->amsdu_len) + /* not enough space to aggregate those two buffers */ + goto end; + + /* Add subframe header. + Note: Fw will take care of adding AMDSU header for the first + subframe while generating 802.11 MAC header */ + INIT_LIST_HEAD(&sw_txhdr->amsdu.hdrs); + sw_txhdr->amsdu.len = len1; + sw_txhdr->amsdu.nb = 1; + sw_txhdr->amsdu.pad = AMSDU_PADDING(len1); + if (rwnx_amsdu_add_subframe_header(rwnx_hw, skb, sw_txhdr)) + goto end; + + sw_txhdr->desc.host.flags |= TXU_CNTRL_AMSDU; + + if (sw_txhdr->amsdu.nb < rwnx_hw->mod_params->amsdu_maxnb) + txq->amsdu = sw_txhdr; + else + rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++; + } + + res = true; + + end: + spin_unlock_bh(&rwnx_hw->tx_lock); + return res; +} +#endif /* CONFIG_RWNX_AMSDUS_TX */ + +#ifdef CONFIG_BR_SUPPORT +int aic_br_client_tx(struct rwnx_vif *vif, struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + /* if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) */ + { + void dhcp_flag_bcast(struct rwnx_vif *vif, struct sk_buff *skb); + int res, is_vlan_tag = 0, i, do_nat25 = 1; + unsigned short vlan_hdr = 0; + void *br_port = NULL; + + /* mac_clone_handle_frame(priv, skb); */ + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) + br_port = vif->ndev->br_port; +#else /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */ + rcu_read_lock(); + br_port = rcu_dereference(vif->ndev->rx_handler_data); + rcu_read_unlock(); +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */ +#ifdef BR_SUPPORT_DEBUG + printk("SA=%pM, br_mac=%pM, type=0x%x, da[0]=%x, scdb=%pM, vif_type=%d\n", skb->data + MACADDRLEN, vif->br_mac, *((unsigned short *)(skb->data + MACADDRLEN * 2)), + skb->data[0], vif->scdb_mac,RWNX_VIF_TYPE(vif)); +#endif + spin_lock_bh(&vif->br_ext_lock); + if (!(skb->data[0] & 1) && + br_port && + memcmp(skb->data + MACADDRLEN, vif->br_mac, MACADDRLEN) && + *((unsigned short *)(skb->data + MACADDRLEN * 2)) != __constant_htons(ETH_P_8021Q) && + *((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP) && + !memcmp(vif->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN) && vif->scdb_entry) { + memcpy(skb->data + MACADDRLEN, vif->ndev->dev_addr, MACADDRLEN); + vif->scdb_entry->ageing_timer = jiffies; + spin_unlock_bh(&vif->br_ext_lock); + } else + /* if (!priv->pmib->ethBrExtInfo.nat25_disable) */ + { + /* if (priv->dev->br_port && + * !memcmp(skb->data+MACADDRLEN, priv->br_mac, MACADDRLEN)) { */ +#if 1 + if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q)) { + is_vlan_tag = 1; + vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)); + for (i = 0; i < 6; i++) + *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2)); + skb_pull(skb, 4); + } + /* if SA == br_mac && skb== IP => copy SIP to br_ip ?? why */ + if (!memcmp(skb->data + MACADDRLEN, vif->br_mac, MACADDRLEN) && + (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP))) + memcpy(vif->br_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4); + + if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)) { + if (memcmp(vif->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN)) { + #if 1 + void *scdb_findEntry(struct rwnx_vif *vif, unsigned char *macAddr, unsigned char *ipAddr); + + vif->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(vif, + skb->data + MACADDRLEN, skb->data + WLAN_ETHHDR_LEN + 12); + if (vif->scdb_entry != NULL) { + memcpy(vif->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN); + memcpy(vif->scdb_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4); + vif->scdb_entry->ageing_timer = jiffies; + do_nat25 = 0; + } + #endif + } else { + if (vif->scdb_entry) { + vif->scdb_entry->ageing_timer = jiffies; + do_nat25 = 0; + } else { + memset(vif->scdb_mac, 0, MACADDRLEN); + memset(vif->scdb_ip, 0, 4); + } + } + } + spin_unlock_bh(&vif->br_ext_lock); +#endif /* 1 */ + if (do_nat25) { + #if 1 + int nat25_db_handle(struct rwnx_vif *vif, struct sk_buff *skb, int method); + if (nat25_db_handle(vif, skb, NAT25_CHECK) == 0) { + struct sk_buff *newskb; + + if (is_vlan_tag) { + skb_push(skb, 4); + for (i = 0; i < 6; i++) + *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2)); + *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q); + *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr; + } + + newskb = skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + if (newskb == NULL) { + /* priv->ext_stats.tx_drops++; */ + printk("TX DROP: skb_copy fail!\n"); + /* goto stop_proc; */ + return -1; + } + dev_kfree_skb_any(skb); + + *pskb = skb = newskb; + if (is_vlan_tag) { + vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)); + for (i = 0; i < 6; i++) + *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2)); + skb_pull(skb, 4); + } + } + + if (skb_is_nonlinear(skb)) + printk("%s(): skb_is_nonlinear!!\n", __FUNCTION__); + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) + res = skb_linearize(skb, GFP_ATOMIC); +#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */ + res = skb_linearize(skb); +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */ + if (res < 0) { + printk("TX DROP: skb_linearize fail!\n"); + /* goto free_and_stop; */ + return -1; + } + + res = nat25_db_handle(vif, skb, NAT25_INSERT); + if (res < 0) { + if (res == -2) { + /* priv->ext_stats.tx_drops++; */ + printk("TX DROP: nat25_db_handle fail!\n"); + /* goto free_and_stop; */ + return -1; + + } + /* we just print warning message and let it go */ + /* DEBUG_WARN("%s()-%d: nat25_db_handle INSERT Warning!\n", __FUNCTION__, __LINE__); */ + /* return -1; */ /* return -1 will cause system crash on 2011/08/30! */ + return 0; + } + #endif + } + + memcpy(skb->data + MACADDRLEN, vif->ndev->dev_addr, MACADDRLEN); + + dhcp_flag_bcast(vif, skb); + + if (is_vlan_tag) { + skb_push(skb, 4); + for (i = 0; i < 6; i++) + *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2)); + *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q); + *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr; + } + } +#if 0 + else { + if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q)) + is_vlan_tag = 1; + + if (is_vlan_tag) { + if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A_VALN(skb->data)) + memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN); + } else { + if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A(skb->data)) + memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN); + } + } +#endif /* 0 */ + + /* check if SA is equal to our MAC */ + if (memcmp(skb->data + MACADDRLEN, vif->ndev->dev_addr, MACADDRLEN)) { + /* priv->ext_stats.tx_drops++; */ + printk("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n", + skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]); + /* goto free_and_stop; */ + return -1; + } + } + printk("%s:exit\n",__func__); + return 0; +} +#endif /* CONFIG_BR_SUPPORT */ + + +#ifdef CONFIG_FILTER_TCP_ACK +/* return: + * 0, msg buf freed by the real driver + * others, skb need free by the caller,remember not use msg->skb! + */ + +int intf_tx(struct rwnx_hw *priv,struct msg_buf *msg) +{ + struct rwnx_vif *rwnx_vif = msg->rwnx_vif; + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_txhdr *txhdr; + struct rwnx_sw_txhdr *sw_txhdr; + struct txdesc_api *desc; + struct rwnx_sta *sta; + struct rwnx_txq *txq; + int headroom; + //int max_headroom; + int hdr_pads; + + u16 frame_len; + u16 frame_oft; + u8 tid; + struct sk_buff *skb=msg->skb; + struct ethhdr eth_t; + + move_tcpack_msg(rwnx_hw,msg); + kfree(msg); + + memcpy(ð_t, skb->data, sizeof(struct ethhdr)); + + /* Get the STA id and TID information */ + sta = rwnx_get_tx_priv(rwnx_vif, skb, &tid); + if (!sta) + goto free; + + txq = rwnx_txq_sta_get(sta, tid, rwnx_hw); + if (txq->idx == TXQ_INACTIVE) + goto free; + +#ifdef CONFIG_RWNX_AMSDUS_TX + if (rwnx_amsdu_add_subframe(rwnx_hw, skb, sta, txq)) + return NETDEV_TX_OK; +#endif + +#ifdef CONFIG_BR_SUPPORT + if (1) {//(check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) { + void *br_port = NULL; + + #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) + br_port = rwnx_vif->ndev->br_port; + #else + rcu_read_lock(); + br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data); + rcu_read_unlock(); + #endif + + if (br_port) { + s32 res = aic_br_client_tx(rwnx_vif, &skb); + if (res == -1) { + goto free; + } + } + } +#endif /* CONFIG_BR_SUPPORT */ + + + /* Retrieve the pointer to the Ethernet data */ + // eth = (struct ethhdr *)skb->data; + + skb_pull(skb, 14); + //hdr_pads = RWNX_SWTXHDR_ALIGN_PADS((long)eth); + hdr_pads = RWNX_SWTXHDR_ALIGN_PADS((long)skb->data); + headroom = sizeof(struct rwnx_txhdr) + hdr_pads; + + skb_push(skb, headroom); + + txhdr = (struct rwnx_txhdr *)skb->data; + sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC); + if (unlikely(sw_txhdr == NULL)) + goto free; + txhdr->sw_hdr = sw_txhdr; + desc = &sw_txhdr->desc; + + frame_len = (u16)skb->len - headroom;// - sizeof(*eth); + + sw_txhdr->txq = txq; + sw_txhdr->frame_len = frame_len; + sw_txhdr->rwnx_sta = sta; + sw_txhdr->rwnx_vif = rwnx_vif; + sw_txhdr->skb = skb; + sw_txhdr->headroom = headroom; + sw_txhdr->map_len = skb->len - offsetof(struct rwnx_txhdr, hw_hdr); + +#ifdef CONFIG_RWNX_AMSDUS_TX + sw_txhdr->amsdu.len = 0; + sw_txhdr->amsdu.nb = 0; +#endif + sw_txhdr->raw_frame = 0; + sw_txhdr->fixed_rate = 0; + + // Fill-in the descriptor + memcpy(&desc->host.eth_dest_addr, eth_t.h_dest, ETH_ALEN); + memcpy(&desc->host.eth_src_addr, eth_t.h_source, ETH_ALEN); + desc->host.ethertype = eth_t.h_proto; + desc->host.staid = sta->sta_idx; + desc->host.tid = tid; + if (unlikely(rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP_VLAN)) + desc->host.vif_idx = rwnx_vif->ap_vlan.master->vif_index; + else + desc->host.vif_idx = rwnx_vif->vif_index; + + if (rwnx_vif->use_4addr && (sta->sta_idx < NX_REMOTE_STA_MAX)) + desc->host.flags = TXU_CNTRL_USE_4ADDR; + else + desc->host.flags = 0; + + if ((rwnx_vif->tdls_status == TDLS_LINK_ACTIVE) && + rwnx_vif->sta.tdls_sta && + (memcmp(desc->host.eth_dest_addr.array, rwnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN) == 0)) { + desc->host.flags |= TXU_CNTRL_TDLS; + rwnx_vif->sta.tdls_sta->tdls.last_tid = desc->host.tid; + //rwnx_vif->sta.tdls_sta->tdls.last_sn = desc->host.sn; + } + + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_MESH_POINT) { + if (rwnx_vif->is_resending) { + desc->host.flags |= TXU_CNTRL_MESH_FWD; + } + } + +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_len[0] = frame_len; +#else + desc->host.packet_len = frame_len; +#endif + + txhdr->hw_hdr.cfm.status.value = 0; + + if (unlikely(rwnx_prep_tx(rwnx_hw, txhdr))) { + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + skb_pull(skb, headroom); + dev_kfree_skb_any(skb); + return NETDEV_TX_BUSY; + } + + /* Fill-in TX descriptor */ + frame_oft = sizeof(struct rwnx_txhdr) - offsetof(struct rwnx_txhdr, hw_hdr) + + hdr_pads;// + sizeof(*eth); +#if 0 +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_addr[0] = sw_txhdr->dma_addr + frame_oft; + desc->host.packet_cnt = 1; +#else + desc->host.packet_addr = sw_txhdr->dma_addr + frame_oft; +#endif +#endif + desc->host.status_desc_addr = sw_txhdr->dma_addr; + + spin_lock_bh(&rwnx_hw->tx_lock); + if (rwnx_txq_queue_skb(skb, txq, rwnx_hw, false)) + rwnx_hwq_process(rwnx_hw, txq->hwq); + spin_unlock_bh(&rwnx_hw->tx_lock); + + return 0;//NETDEV_TX_OK + +free: + dev_kfree_skb_any(skb); + + return 0;//NETDEV_TX_OK +} +#endif + + +/** + * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + * struct net_device *dev); + * Called when a packet needs to be transmitted. + * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. + * (can also return NETDEV_TX_LOCKED if NETIF_F_LLTX) + * + * - Initialize the desciptor for this pkt (stored in skb before data) + * - Push the pkt in the corresponding Txq + * - If possible (i.e. credit available and not in PS) the pkt is pushed + * to fw + */ +netdev_tx_t rwnx_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rwnx_vif *rwnx_vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_txhdr *txhdr; + struct rwnx_sw_txhdr *sw_txhdr; + struct txdesc_api *desc; + struct rwnx_sta *sta; + struct rwnx_txq *txq; + int headroom; + int max_headroom; + int hdr_pads; + + u16 frame_len; + u16 frame_oft; + u8 tid; + + struct ethhdr eth_t; +#ifdef CONFIG_FILTER_TCP_ACK + struct msg_buf *msgbuf; +#endif + +#ifdef CONFIG_ONE_TXQ + skb->queue_mapping = rwnx_select_txq(rwnx_vif, skb); +#endif + + + sk_pacing_shift_update(skb->sk, rwnx_hw->tcp_pacing_shift); + max_headroom = sizeof(struct rwnx_txhdr); + + /* check whether the current skb can be used */ + if (skb_shared(skb) || (skb_headroom(skb) < max_headroom) || + (skb_cloned(skb) && (dev->priv_flags & IFF_BRIDGE_PORT))) { + struct sk_buff *newskb = skb_copy_expand(skb, max_headroom, 0, + GFP_ATOMIC); + if (unlikely(newskb == NULL)) + goto free; + + dev_kfree_skb_any(skb); + + skb = newskb; + } + + if(skb->priority < 3) + skb->priority = 0; + +#ifdef CONFIG_FILTER_TCP_ACK + if(cpu_to_le16(skb->len) <= MAX_TCP_ACK){ + msgbuf=intf_tcp_alloc_msg(msgbuf); + msgbuf->rwnx_vif=rwnx_vif; + msgbuf->skb=skb; + if(filter_send_tcp_ack(rwnx_hw,msgbuf,skb->data,cpu_to_le16(skb->len))){ + return NETDEV_TX_OK; + }else{ + move_tcpack_msg(rwnx_hw,msgbuf); + kfree(msgbuf); + } + } +#endif + memcpy(ð_t, skb->data, sizeof(struct ethhdr)); + + /* Get the STA id and TID information */ + sta = rwnx_get_tx_priv(rwnx_vif, skb, &tid); + if (!sta) + goto free; + + txq = rwnx_txq_sta_get(sta, tid, rwnx_hw); + if (txq->idx == TXQ_INACTIVE) + goto free; + +#ifdef CONFIG_RWNX_AMSDUS_TX + if (rwnx_amsdu_add_subframe(rwnx_hw, skb, sta, txq)) + return NETDEV_TX_OK; +#endif + +#ifdef CONFIG_BR_SUPPORT + if (1) {//(check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) { + void *br_port = NULL; + + #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) + br_port = rwnx_vif->ndev->br_port; + #else + rcu_read_lock(); + br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data); + rcu_read_unlock(); + #endif + + if (br_port) { + s32 res = aic_br_client_tx(rwnx_vif, &skb); + if (res == -1) { + goto free; + } + } + } +#endif /* CONFIG_BR_SUPPORT */ + + /* Retrieve the pointer to the Ethernet data */ + // eth = (struct ethhdr *)skb->data; + + skb_pull(skb, 14); + //hdr_pads = RWNX_SWTXHDR_ALIGN_PADS((long)eth); + hdr_pads = RWNX_SWTXHDR_ALIGN_PADS((long)skb->data); + headroom = sizeof(struct rwnx_txhdr) + hdr_pads; + + skb_push(skb, headroom); + + txhdr = (struct rwnx_txhdr *)skb->data; + sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC); + + if (unlikely(sw_txhdr == NULL)) + goto free; + txhdr->sw_hdr = sw_txhdr; + desc = &sw_txhdr->desc; + + frame_len = (u16)skb->len - headroom;// - sizeof(*eth); + + sw_txhdr->txq = txq; + sw_txhdr->frame_len = frame_len; + sw_txhdr->rwnx_sta = sta; + sw_txhdr->rwnx_vif = rwnx_vif; + sw_txhdr->skb = skb; + sw_txhdr->headroom = headroom; + sw_txhdr->map_len = skb->len - offsetof(struct rwnx_txhdr, hw_hdr); + +#ifdef CONFIG_RWNX_AMSDUS_TX + sw_txhdr->amsdu.len = 0; + sw_txhdr->amsdu.nb = 0; +#endif + sw_txhdr->raw_frame = 0; + sw_txhdr->fixed_rate = 0; + // Fill-in the descriptor + memcpy(&desc->host.eth_dest_addr, eth_t.h_dest, ETH_ALEN); + memcpy(&desc->host.eth_src_addr, eth_t.h_source, ETH_ALEN); + desc->host.ethertype = eth_t.h_proto; + desc->host.staid = sta->sta_idx; + desc->host.tid = tid; + if (unlikely(rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP_VLAN)) + desc->host.vif_idx = rwnx_vif->ap_vlan.master->vif_index; + else + desc->host.vif_idx = rwnx_vif->vif_index; + + if (rwnx_vif->use_4addr && (sta->sta_idx < NX_REMOTE_STA_MAX)) + desc->host.flags = TXU_CNTRL_USE_4ADDR; + else + desc->host.flags = 0; + + if ((rwnx_vif->tdls_status == TDLS_LINK_ACTIVE) && + rwnx_vif->sta.tdls_sta && + (memcmp(desc->host.eth_dest_addr.array, rwnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN) == 0)) { + desc->host.flags |= TXU_CNTRL_TDLS; + rwnx_vif->sta.tdls_sta->tdls.last_tid = desc->host.tid; + //rwnx_vif->sta.tdls_sta->tdls.last_sn = desc->host.sn; + } + + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_MESH_POINT) { + if (rwnx_vif->is_resending) { + desc->host.flags |= TXU_CNTRL_MESH_FWD; + } + } + +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_len[0] = frame_len; +#else + desc->host.packet_len = frame_len; +#endif + + txhdr->hw_hdr.cfm.status.value = 0; + + if (unlikely(rwnx_prep_tx(rwnx_hw, txhdr))) { + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + skb_pull(skb, headroom); + dev_kfree_skb_any(skb); + return NETDEV_TX_BUSY; + } + + /* Fill-in TX descriptor */ + frame_oft = sizeof(struct rwnx_txhdr) - offsetof(struct rwnx_txhdr, hw_hdr) + + hdr_pads;// + sizeof(*eth); + #if 0 +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_addr[0] = sw_txhdr->dma_addr + frame_oft; + desc->host.packet_cnt = 1; +#else + desc->host.packet_addr = sw_txhdr->dma_addr + frame_oft; +#endif +#endif + desc->host.status_desc_addr = sw_txhdr->dma_addr; + + spin_lock_bh(&rwnx_hw->tx_lock); + if (rwnx_txq_queue_skb(skb, txq, rwnx_hw, false)) + rwnx_hwq_process(rwnx_hw, txq->hwq); + spin_unlock_bh(&rwnx_hw->tx_lock); + + return NETDEV_TX_OK; + +free: + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +/** + * rwnx_start_mgmt_xmit - Transmit a management frame + * + * @vif: Vif that send the frame + * @sta: Destination of the frame. May be NULL if the destiantion is unknown + * to the AP. + * @params: Mgmt frame parameters + * @offchan: Indicate whether the frame must be send via the offchan TXQ. + * (is is redundant with params->offchan ?) + * @cookie: updated with a unique value to identify the frame with upper layer + * + */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta, + struct cfg80211_mgmt_tx_params *params, bool offchan, + u64 *cookie) +#else +int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta, + struct ieee80211_channel *channel, bool offchan, + unsigned int wait, const u8* buf, size_t len, + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + bool no_cck, + #endif + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + bool dont_wait_for_ack, + #endif + u64 *cookie) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ +{ + struct rwnx_hw *rwnx_hw = vif->rwnx_hw; + struct rwnx_txhdr *txhdr; + struct rwnx_sw_txhdr *sw_txhdr; + struct txdesc_api *desc; + struct sk_buff *skb; + u16 frame_len, headroom, frame_oft; + u8 *data; + int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX; + struct rwnx_txq *txq; + bool robust; + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + const u8 *buf = params->buf; + size_t len = params->len; + bool no_cck = params->no_cck; + #endif + headroom = sizeof(struct rwnx_txhdr); + frame_len = len; + + //---------------------------------------------------------------------- + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC; + } + + /* Set TID and Queues indexes */ + if (sta) { + txq = rwnx_txq_sta_get(sta, 8, rwnx_hw); + } else { + if (offchan) + txq = &rwnx_hw->txq[nx_off_chan_txq_idx]; + else + txq = rwnx_txq_vif_get(vif, NX_UNK_TXQ_TYPE); + } + + /* Ensure that TXQ is active */ + if (txq->idx == TXQ_INACTIVE) { + netdev_dbg(vif->ndev, "TXQ inactive\n"); + return -EBUSY; + } + + /* + * Create a SK Buff object that will contain the provided data + */ + skb = dev_alloc_skb(headroom + frame_len); + + if (!skb) { + return -ENOMEM; + } + + *cookie = (unsigned long)skb; + + /* + * Move skb->data pointer in order to reserve room for rwnx_txhdr + * headroom value will be equal to sizeof(struct rwnx_txhdr) + */ + skb_reserve(skb, headroom); + + /* + * Extend the buffer data area in order to contain the provided packet + * len value (for skb) will be equal to param->len + */ + data = skb_put(skb, frame_len); + /* Copy the provided data */ + memcpy(data, buf, frame_len); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) + robust = ieee80211_is_robust_mgmt_frame(skb); +#else + if (skb->len < 25){ + robust = false; + } + robust = ieee80211_is_robust_mgmt_frame((void *)skb->data); +#endif + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + /* Update CSA counter if present */ + if (unlikely(params->n_csa_offsets) && + vif->wdev.iftype == NL80211_IFTYPE_AP && + vif->ap.csa) { + int i; + + data = skb->data; + for (i = 0; i < params->n_csa_offsets ; i++) { + data[params->csa_offsets[i]] = vif->ap.csa->count; + } + } + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ + + /* + * Go back to the beginning of the allocated data area + * skb->data pointer will move backward + */ + skb_push(skb, headroom); + + //---------------------------------------------------------------------- + + /* Fill the TX Header */ + txhdr = (struct rwnx_txhdr *)skb->data; + + txhdr->hw_hdr.cfm.status.value = 0; + + //---------------------------------------------------------------------- + + /* Fill the SW TX Header */ + sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC); + + if (unlikely(sw_txhdr == NULL)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + + txhdr->sw_hdr = sw_txhdr; + + sw_txhdr->txq = txq; + sw_txhdr->frame_len = frame_len; + sw_txhdr->rwnx_sta = sta; + sw_txhdr->rwnx_vif = vif; + sw_txhdr->skb = skb; + sw_txhdr->headroom = headroom; + sw_txhdr->map_len = skb->len - offsetof(struct rwnx_txhdr, hw_hdr); +#ifdef CONFIG_RWNX_AMSDUS_TX + sw_txhdr->amsdu.len = 0; + sw_txhdr->amsdu.nb = 0; +#endif + sw_txhdr->raw_frame = 0; + sw_txhdr->fixed_rate = 0; + //---------------------------------------------------------------------- + + /* Fill the Descriptor to be provided to the MAC SW */ + desc = &sw_txhdr->desc; + + desc->host.ethertype = 0; + desc->host.staid = (sta) ? sta->sta_idx : 0xFF; + desc->host.vif_idx = vif->vif_index; + desc->host.tid = 0xFF; + desc->host.flags = TXU_CNTRL_MGMT; + if (robust) + desc->host.flags |= TXU_CNTRL_MGMT_ROBUST; + +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_len[0] = frame_len; +#else + desc->host.packet_len = frame_len; +#endif + + if (no_cck) { + desc->host.flags |= TXU_CNTRL_MGMT_NO_CCK; + } + + /* Get DMA Address */ + if (unlikely(rwnx_prep_tx(rwnx_hw, txhdr))) { + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + dev_kfree_skb(skb); + return -EBUSY; + } + + frame_oft = sizeof(struct rwnx_txhdr) - offsetof(struct rwnx_txhdr, hw_hdr); + #if 0 +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_addr[0] = sw_txhdr->dma_addr + frame_oft; + desc->host.packet_cnt = 1; +#else + desc->host.packet_addr = sw_txhdr->dma_addr + frame_oft; +#endif + #endif + desc->host.status_desc_addr = sw_txhdr->dma_addr; + + //---------------------------------------------------------------------- + + spin_lock_bh(&rwnx_hw->tx_lock); + AICWFDBG(LOGDEBUG, "%s sta:%p skb:%p desc->host.staid:%d \r\n", __func__, sta, skb, desc->host.staid); + if (rwnx_txq_queue_skb(skb, txq, rwnx_hw, false)) + rwnx_hwq_process(rwnx_hw, txq->hwq); + spin_unlock_bh(&rwnx_hw->tx_lock); + + return 0; +} + +#ifdef CONFIG_BAND_STEERING +void rwnx_probersp_work(struct work_struct *work) +{ + struct ap_probe_rsp *rsp = container_of(work, struct ap_probe_rsp, rsp_work); + struct rwnx_vif *rwnx_vif = container_of(rsp, struct rwnx_vif, pb_pool[0]); + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct sk_buff *skb = NULL; + struct rwnx_bcn *bcn = &rwnx_vif->ap.bcn; + u8_l *buf; + struct ieee80211_mgmt *mgmt; + bool robust; + u16_l headroom, frame_len; + struct rwnx_sta *sta = NULL; + struct rwnx_txq *txq; + struct rwnx_txhdr *txhdr; + struct rwnx_sw_txhdr *sw_txhdr; + struct txdesc_api *desc; + headroom = sizeof(struct rwnx_txhdr); + frame_len = bcn->len; + + if (aicwf_band_steering_block_chk(rwnx_vif, rsp->da)) { + AICWFDBG(LOGSTEER, "usb %s, %d, probe_rsp refuse temp %pM\n", __func__, rwnx_vif->ap.freq, rsp->da); + goto free_use; + } + if (frame_len == 0 || !bcn->head || bcn->head_len == 0 || bcn->head_len > frame_len) { + AICWFDBG(LOGSTEER, "%s bcn head NULL\n", __func__); + goto free_use; + } + + skb = dev_alloc_skb(frame_len + headroom); + if (!skb) { + AICWFDBG(LOGERROR, "%s alloc skb fail\n", __func__); + goto free_use; + } + + skb_reserve(skb, headroom); + buf = skb_put(skb, frame_len); + + memcpy(buf, bcn->head, bcn->head_len); + buf += bcn->head_len; + *buf++ = WLAN_EID_TIM; + *buf++ = 4; + *buf++ = 0; + *buf++ = bcn->dtim; + *buf++ = 0; + *buf++ = 0; + if (bcn->tail) { + memcpy(buf, bcn->tail, bcn->tail_len); + buf += bcn->tail_len; + } + if (bcn->ies) + memcpy(buf, bcn->ies, bcn->ies_len); + + mgmt = (struct ieee80211_mgmt *)skb->data; + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); + memcpy (mgmt->da, rsp->da, ETH_ALEN); + rsp->in_use = false; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) + robust = ieee80211_is_robust_mgmt_frame(skb); +#else + if (skb->len < 25) + robust = false; + robust = ieee80211_is_robust_mgmt_frame((void *)skb->data); +#endif + + sta = rwnx_retrieve_sta(rwnx_hw, rwnx_vif, mgmt->da, mgmt->frame_control, true); + if (sta) { + txq = rwnx_txq_sta_get(sta, 8, rwnx_hw); + } else { + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + } + if (txq->idx == TXQ_INACTIVE) { + AICWFDBG(LOGERROR, "%s TXQ inactive\n", __func__); + goto fail; + } + + skb_push(skb, headroom); + txhdr = (struct rwnx_txhdr *)skb->data; + txhdr->hw_hdr.cfm.status.value = 0; + sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC); + if (unlikely(sw_txhdr == NULL)) { + AICWFDBG(LOGERROR, "%s cache fail\n", __func__); + goto fail; + } + txhdr->sw_hdr = sw_txhdr; + sw_txhdr->txq = txq; + sw_txhdr->frame_len = frame_len; + sw_txhdr->rwnx_sta = sta; + sw_txhdr->rwnx_vif = rwnx_vif; + sw_txhdr->skb = skb; + sw_txhdr->headroom = headroom; + sw_txhdr->map_len = skb->len - offsetof(struct rwnx_txhdr, hw_hdr); +#ifdef CONFIG_RWNX_AMSDUS_TX + sw_txhdr->amsdu.len = 0; + sw_txhdr->amsdu.nb = 0; +#endif + sw_txhdr->raw_frame = 0; + sw_txhdr->fixed_rate = 0; + + desc = &sw_txhdr->desc; + desc->host.ethertype = 0; + desc->host.staid = (sta) ? sta->sta_idx : 0xFF; + desc->host.vif_idx = rwnx_vif->vif_index; + desc->host.tid = 0xFF; + desc->host.flags = TXU_CNTRL_MGMT; + if (robust) + desc->host.flags |= TXU_CNTRL_MGMT_ROBUST; +#ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_len[0] = frame_len; +#else + desc->host.packet_len = frame_len; +#endif + desc->host.status_desc_addr = sw_txhdr->dma_addr; + + spin_lock_bh(&rwnx_hw->tx_lock); + AICWFDBG(LOGSTEER, "usb p_rsp: %pM", mgmt->da); + // AICWFDBG(LOGDEBUG, "usb %s sta:%p skb:%p desc->host.staid:%d \r\n", __func__, sta, skb, desc->host.staid); + if (rwnx_txq_queue_skb(skb, txq, rwnx_hw, false)) + rwnx_hwq_process(rwnx_hw, txq->hwq); + spin_unlock_bh(&rwnx_hw->tx_lock); + + return; + +fail: + if (sw_txhdr) + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + dev_kfree_skb(skb); +free_use: + if (rsp->in_use) + rsp->in_use = false; +} +#endif + +#ifdef CONFIG_RWNX_MON_XMIT +/** + * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + * struct net_device *dev); + * Called when a packet needs to be transmitted. + * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. + * (can also return NETDEV_TX_LOCKED if NETIF_F_LLTX) + * + * - Initialize the desciptor for this pkt (stored in skb before data) + * - Push the pkt in the corresponding Txq + * - If possible (i.e. credit available and not in PS) the pkt is pushed + * to fw + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) +#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20 +#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 +#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) +#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01 +#endif + +netdev_tx_t rwnx_start_monitor_if_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int rtap_len, ret, idx, tmp_len; + struct ieee80211_radiotap_header *rtap_hdr; // net/ieee80211_radiotap.h + struct ieee80211_radiotap_iterator iterator; // net/cfg80211.h + u8_l *rtap_buf = (u8_l *)skb->data; + u8_l rate; + + struct rwnx_vif *vif = netdev_priv(dev); + struct rwnx_hw *rwnx_hw = vif->rwnx_hw; + struct rwnx_txhdr *txhdr; + struct rwnx_sw_txhdr *sw_txhdr; + struct txdesc_api *desc; + struct rwnx_sta *sta; + struct rwnx_txq *txq; + u16_l frame_len, headroom, frame_oft; + u8_l tid, rate_fmt = FORMATMOD_NON_HT, rate_idx = 0, txsig_bw = PHY_CHNL_BW_20; + u8_l *pframe, *data; + bool robust; + struct sk_buff *skb_mgmt; + bool offchan = false; + int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX; + + rtap_hdr = (struct ieee80211_radiotap_header*)(rtap_buf); + rtap_len = ieee80211_get_radiotap_len(rtap_buf);//max_length + frame_len = skb->len; + + AICWFDBG(LOGINFO, "rwnx_start_monitor_if_xmit, skb_len=%d, rtap_len=%d\n", skb->len, rtap_len); +//rwnx_data_dump((char*)__func__, skb->data, skb->len); + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC; + } + + + if (unlikely(rtap_hdr->it_version)){ + AICWFDBG(LOGERROR, "%s itv \r\n", __func__); + goto free_tag; + } + + if (unlikely(skb->len < rtap_len)){ + AICWFDBG(LOGERROR, "%s skb->len < rtap_len \r\n", __func__); + goto free_tag; + } + + if (unlikely(rtap_len < sizeof(struct ieee80211_radiotap_header))){ + AICWFDBG(LOGERROR, "%s rtap_len < sizeof(struct ieee80211_radiotap_header) \r\n", __func__); + goto free_tag; + } + + frame_len -= rtap_len; + pframe = rtap_buf + rtap_len; + + // Parse radiotap for injection items and overwrite attribs as needed + ret = ieee80211_radiotap_iterator_init(&iterator, rtap_hdr, rtap_len, NULL); + while (!ret) { + ret = ieee80211_radiotap_iterator_next(&iterator); + if (ret) { + continue; + } + AICWFDBG(LOGDEBUG, "%s iterator.this_arg_index:%d iterator.this_arg:%x\r\n", __func__, + iterator.this_arg_index, *iterator.this_arg); + switch (iterator.this_arg_index) { + case IEEE80211_RADIOTAP_RATE: + // This is basic 802.11b/g rate; use MCS/VHT for higher rates + rate = *iterator.this_arg; + AICWFDBG(LOGDEBUG, "rate=0x%x\n", rate); + for (idx = 0; idx < HW_RATE_MAX; idx++) { + if ((rate * 5) == tx_legrates_lut_rate[idx]) { + AICWFDBG(LOGDEBUG, "%s datarate:%d \r\n", __func__, tx_legrates_lut_rate[idx]); + break; + } + } + + if (idx < HW_RATE_MAX) { + rate_idx = idx; + AICWFDBG(LOGDEBUG, "rate_idx = %d \r\n", rate_idx); + } else { + AICWFDBG(LOGERROR, "invalid radiotap rate: %d\n", rate); + } + break; + + case IEEE80211_RADIOTAP_TX_FLAGS: { + u16_l txflags = get_unaligned_le16(iterator.this_arg); + AICWFDBG(LOGDEBUG, "txflags=0x%x\n", txflags); + if ((txflags & IEEE80211_RADIOTAP_F_TX_NOACK) == 0) { + AICWFDBG(LOGDEBUG, " TX_NOACK\n"); + } + if (txflags & 0x0010) { // Use preconfigured seq num + // NOTE: this is currently ignored due to qos_en=_FALSE and HW seq num override + AICWFDBG(LOGDEBUG, " GetSequence\n"); + } + } + break; + + case IEEE80211_RADIOTAP_MCS: { + u8_l mcs_have = iterator.this_arg[0]; + AICWFDBG(LOGDEBUG, "mcs_have=0x%x\n", mcs_have); + rate_fmt = FORMATMOD_HT_MF; + if (mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_BW) { + u8_l bw = (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_BW_MASK); + u8_l ch_offset = 0; + if (bw == IEEE80211_RADIOTAP_MCS_BW_40) { + txsig_bw = PHY_CHNL_BW_40; + } else if (bw == IEEE80211_RADIOTAP_MCS_BW_20L) { + bw = IEEE80211_RADIOTAP_MCS_BW_20; + ch_offset = 1; // CHNL_OFFSET_LOWER; + } else if (bw == IEEE80211_RADIOTAP_MCS_BW_20U) { + bw = IEEE80211_RADIOTAP_MCS_BW_20; + ch_offset = 2; // CHNL_OFFSET_UPPER; + } + AICWFDBG(LOGDEBUG, " bw=%d, ch_offset=%d\n", bw, ch_offset); + } + if (mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_MCS) { + u8_l fixed_rate = iterator.this_arg[2] & 0x7f; + if (fixed_rate > 31) { + fixed_rate = 0; + } + rate_idx = fixed_rate; + AICWFDBG(LOGDEBUG, " fixed_rate=0x%x\n", fixed_rate); + } + if ((mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_GI) && (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_SGI)) { + AICWFDBG(LOGDEBUG, " sgi\n"); + } + if ((mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_FEC) && (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_FEC_LDPC)) { + AICWFDBG(LOGDEBUG, " ldpc\n"); + } + if (mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_STBC) { + u8 stbc = (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_STBC_MASK) >> IEEE80211_RADIOTAP_MCS_STBC_SHIFT; + AICWFDBG(LOGDEBUG, " stbc=0x%x\n", stbc); + } + } + break; + + case IEEE80211_RADIOTAP_VHT: { + unsigned int mcs, nss; + u8 known = iterator.this_arg[0]; + u8 flags = iterator.this_arg[2]; + rate_fmt = FORMATMOD_VHT; + AICWFDBG(LOGDEBUG, "known=0x%x, flags=0x%x\n", known, flags); + // NOTE: this code currently only supports 1SS for radiotap defined rates + if ((known & IEEE80211_RADIOTAP_VHT_KNOWN_STBC) && (flags & IEEE80211_RADIOTAP_VHT_FLAG_STBC)) { + AICWFDBG(LOGDEBUG, " stbc\n"); + } + if ((known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) && (flags & IEEE80211_RADIOTAP_VHT_FLAG_SGI)) { + AICWFDBG(LOGDEBUG, " sgi\n"); + } + if (known & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { + u8_l bw = iterator.this_arg[3] & 0x1F; + AICWFDBG(LOGDEBUG, " bw=0x%x\n",bw); + // NOTE: there are various L and U, but we just use straight 20/40/80 + // since it's not clear how to set CHNL_OFFSET_LOWER/_UPPER with different + // sideband sizes/configurations. TODO. + // Also, any 160 is treated as 80 due to lack of WIDTH_160. + txsig_bw = PHY_CHNL_BW_40; + if (bw == 0) { + txsig_bw = PHY_CHNL_BW_20; + AICWFDBG(LOGDEBUG, " 20M\n"); + } else if (bw >=1 && bw <= 3) { + AICWFDBG(LOGDEBUG, " 40M\n"); + } else if (bw >=4 && bw <= 10) { + AICWFDBG(LOGDEBUG, " 80M\n"); + } else if (bw >= 11 && bw <= 25) { + AICWFDBG(LOGDEBUG, " 160M\n"); + } + } + // User 0 + nss = iterator.this_arg[4] & 0x0F; // Number of spatial streams + AICWFDBG(LOGINFO, " nss=0x%x\n", nss); + if (nss > 0) { + if (nss > 4) nss = 4; + mcs = (iterator.this_arg[4]>>4) & 0x0F; // MCS rate index + if (mcs > 8) mcs = 9; + rate_idx = mcs; + AICWFDBG(LOGDEBUG, " mcs=0x%x\n", mcs); + if (iterator.this_arg[8] & IEEE80211_RADIOTAP_CODING_LDPC_USER0) { + AICWFDBG(LOGDEBUG, " ldpc\n"); + } + } + } + break; + + case IEEE80211_RADIOTAP_HE: { + u16 data1 = ((u16)iterator.this_arg[1] << 8) | iterator.this_arg[0]; + u16 data2 = ((u16)iterator.this_arg[3] << 8) | iterator.this_arg[2]; + u16 data3 = ((u16)iterator.this_arg[5] << 8) | iterator.this_arg[4]; + u16 data5 = ((u16)iterator.this_arg[9] << 8) | iterator.this_arg[8]; + u8 fmt_he = data1 & IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK; + if (fmt_he == IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU) { + rate_fmt = FORMATMOD_HE_MU; + } else if (fmt_he == IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU) { + rate_fmt = FORMATMOD_HE_ER; + } else { + rate_fmt = FORMATMOD_HE_SU; + } + if (data1 & IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN) { + u8 mcs = (data3 & IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS) >> 8; + if (mcs > 11) mcs = 11; + rate_idx = mcs; + } + if (data1 & IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN) { + u8 bw = data5 & IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC; + txsig_bw = (bw == IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ) ? PHY_CHNL_BW_20 : PHY_CHNL_BW_40; + } + if (data2 & IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN) { + u8 gi = (data5 & IEEE80211_RADIOTAP_HE_DATA5_GI) >> 4; + AICWFDBG(LOGDEBUG, " gi: %d\n", gi); + } + } + break; + + default: + AICWFDBG(LOGERROR, "unparsed arg: 0x%x\n",iterator.this_arg_index); + break; + } + } + + #if 0 + // dump buffer + tmp_len = 128; + if (skb->len < 128) { + tmp_len = skb->len; + } + for (idx = 0; idx < tmp_len; idx+=16) { + printk("[%04X] %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", idx, + rtap_buf[idx+0],rtap_buf[idx+1],rtap_buf[idx+2],rtap_buf[idx+3], + rtap_buf[idx+4],rtap_buf[idx+5],rtap_buf[idx+6],rtap_buf[idx+7], + rtap_buf[idx+8],rtap_buf[idx+9],rtap_buf[idx+10],rtap_buf[idx+11], + rtap_buf[idx+12],rtap_buf[idx+13],rtap_buf[idx+14],rtap_buf[idx+15]); + } + #endif + + /* Get the STA id and TID information */ + sta = rwnx_get_tx_priv(vif, skb, &tid); + //if (!sta) { + // printk("sta=null, tid=0x%x\n", tid); + //} + /* Set TID and Queues indexes */ + if (sta) { + txq = rwnx_txq_sta_get(sta, 8, rwnx_hw); + } else { + if (offchan) + txq = &rwnx_hw->txq[nx_off_chan_txq_idx]; + else + txq = rwnx_txq_vif_get(vif, NX_UNK_TXQ_TYPE); + } + if (txq->idx == TXQ_INACTIVE) { + AICWFDBG(LOGERROR, "TXQ_INACTIVE\n"); + goto free_tag; + } + // prepare to xmit + headroom = sizeof(struct rwnx_txhdr); + skb_mgmt = dev_alloc_skb(headroom + frame_len); + if (!skb_mgmt) { + AICWFDBG(LOGERROR, "skb_mgmt alloc fail\n"); + goto free_tag; + } + skb_reserve(skb_mgmt, headroom); + data = skb_put(skb_mgmt, frame_len); + /* Copy the provided data */ + memcpy(data, pframe, frame_len); + robust = ieee80211_is_robust_mgmt_frame( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) + (void*)skb_mgmt +#else + skb_mgmt +#endif + ); + skb_push(skb_mgmt, headroom); + /* Fill the TX Header */ + txhdr = (struct rwnx_txhdr *)skb_mgmt->data; + txhdr->hw_hdr.cfm.status.value = 0; + /* Fill the SW TX Header */ + sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC); + if (unlikely(sw_txhdr == NULL)) { + dev_kfree_skb(skb_mgmt); + AICWFDBG(LOGERROR, "sw_txhdr alloc fail\n"); + goto free_tag; + } + txhdr->sw_hdr = sw_txhdr; + sw_txhdr->txq = txq; + sw_txhdr->frame_len = frame_len; + sw_txhdr->rwnx_sta = sta; + sw_txhdr->rwnx_vif = vif; + sw_txhdr->skb = skb_mgmt; + sw_txhdr->headroom = headroom; + sw_txhdr->map_len = skb_mgmt->len - offsetof(struct rwnx_txhdr, hw_hdr); + sw_txhdr->raw_frame = 1; + sw_txhdr->fixed_rate = 1; + sw_txhdr->rate_config = ((rate_fmt << FORMAT_MOD_TX_RCX_OFT) & FORMAT_MOD_TX_RCX_MASK) | + ((txsig_bw << BW_TX_RCX_OFT) & BW_TX_RCX_MASK) | + ((rate_idx << MCS_INDEX_TX_RCX_OFT) & MCS_INDEX_TX_RCX_MASK); // from radiotap + /* Fill the Descriptor to be provided to the MAC SW */ + desc = &sw_txhdr->desc; + desc->host.staid = (sta) ? sta->sta_idx : 0xFF; + desc->host.vif_idx = vif->vif_index; + desc->host.tid = 0xFF; + desc->host.flags = TXU_CNTRL_MGMT; + if (robust) { + desc->host.flags |= TXU_CNTRL_MGMT_ROBUST; + } + frame_oft = sizeof(struct rwnx_txhdr) - offsetof(struct rwnx_txhdr, hw_hdr); + #if 0 + #ifdef CONFIG_RWNX_SPLIT_TX_BUF + desc->host.packet_addr[0] = sw_txhdr->dma_addr + frame_oft; + desc->host.packet_len[0] = frame_len; + desc->host.packet_cnt = 1; + #else + desc->host.packet_addr = sw_txhdr->dma_addr + frame_oft; + desc->host.packet_len = frame_len; + #endif + #else + desc->host.packet_len = frame_len; + #endif + + desc->host.status_desc_addr = sw_txhdr->dma_addr; + + spin_lock_bh(&rwnx_hw->tx_lock); + AICWFDBG(LOGTRACE, "%s send data\r\n", __func__); + if (rwnx_txq_queue_skb(skb_mgmt, txq, rwnx_hw, false)) + rwnx_hwq_process(rwnx_hw, txq->hwq); + spin_unlock_bh(&rwnx_hw->tx_lock); + +free_tag: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} +#endif + +/** + * rwnx_txdatacfm - FW callback for TX confirmation + * + * called with tx_lock hold + */ +int rwnx_txdatacfm(void *pthis, void *host_id) +{ + struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)pthis; + struct sk_buff *skb = host_id; + struct rwnx_txhdr *txhdr; + union rwnx_hw_txstatus rwnx_txst; + struct rwnx_sw_txhdr *sw_txhdr; + struct rwnx_hwq *hwq; + struct rwnx_txq *txq; + u16 headroom; + //int peek_off = offsetof(struct rwnx_hw_txhdr, cfm); + //int peek_len = sizeof(((struct rwnx_hw_txhdr *)0)->cfm); + + txhdr = (struct rwnx_txhdr *)skb->data; + sw_txhdr = txhdr->sw_hdr; + + /* Read status in the TX control header */ + rwnx_txst = txhdr->hw_hdr.cfm.status; + + /* Check status in the header. If status is null, it means that the buffer + * was not transmitted and we have to return immediately */ + if (rwnx_txst.value == 0) { + //return -1; + rwnx_txst.tx_done = 1; + } + +#ifdef AICWF_USB_SUPPORT + if (rwnx_hw->usbdev->state == USB_DOWN_ST) { + headroom = sw_txhdr->headroom; + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + skb_pull(skb, headroom); + consume_skb(skb); + return 0; + } +#endif +#ifdef AICWF_SDIO_SUPPORT + if(rwnx_hw->sdiodev->bus_if->state == BUS_DOWN_ST) { + headroom = sw_txhdr->headroom; + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + skb_pull(skb, headroom); + consume_skb(skb); + return 0; + } +#endif + + txq = sw_txhdr->txq; + /* don't use txq->hwq as it may have changed between push and confirm */ + hwq = &rwnx_hw->hwq[sw_txhdr->hw_queue]; + rwnx_txq_confirm_any(rwnx_hw, txq, hwq, sw_txhdr); + + /* Update txq and HW queue credits */ + if (sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT) { + trace_printk("done=%d retry_required=%d sw_retry_required=%d acknowledged=%d\n", + rwnx_txst.tx_done, rwnx_txst.retry_required, + rwnx_txst.sw_retry_required, rwnx_txst.acknowledged); +#ifdef CREATE_TRACE_POINTS + trace_mgmt_cfm(sw_txhdr->rwnx_vif->vif_index, + (sw_txhdr->rwnx_sta) ? sw_txhdr->rwnx_sta->sta_idx : 0xFF, + rwnx_txst.acknowledged); +#endif + /* Confirm transmission to CFG80211 */ + cfg80211_mgmt_tx_status(&sw_txhdr->rwnx_vif->wdev, + (unsigned long)skb, + (skb->data + sw_txhdr->headroom), + sw_txhdr->frame_len, + rwnx_txst.acknowledged, + GFP_ATOMIC); + } else if ((txq->idx != TXQ_INACTIVE) && + (rwnx_txst.retry_required || rwnx_txst.sw_retry_required)) { + bool sw_retry = (rwnx_txst.sw_retry_required) ? true : false; + + /* Reset the status */ + txhdr->hw_hdr.cfm.status.value = 0; + + /* The confirmed packet was part of an AMPDU and not acked + * correctly, so reinject it in the TX path to be retried */ + rwnx_tx_retry(rwnx_hw, skb, txhdr, sw_retry); + return 0; + } +#ifdef CREATE_TRACE_POINTS + trace_skb_confirm(skb, txq, hwq, &txhdr->hw_hdr.cfm); +#endif + /* STA may have disconnect (and txq stopped) when buffers were stored + in fw. In this case do nothing when they're returned */ + if (txq->idx != TXQ_INACTIVE) { + #if 0 + if (txhdr->hw_hdr.cfm.credits) { + txq->credits += txhdr->hw_hdr.cfm.credits; + if (txq->credits <= 0) + rwnx_txq_stop(txq, RWNX_TXQ_STOP_FULL); + else if (txq->credits > 0) + rwnx_txq_start(txq, RWNX_TXQ_STOP_FULL); + } + #endif + + /* continue service period */ + if (unlikely(txq->push_limit && !rwnx_txq_is_full(txq))) { + rwnx_txq_add_to_hw_list(txq); + } + } + + if (txhdr->hw_hdr.cfm.ampdu_size && + txhdr->hw_hdr.cfm.ampdu_size < IEEE80211_MAX_AMPDU_BUF) + rwnx_hw->stats.ampdus_tx[txhdr->hw_hdr.cfm.ampdu_size - 1]++; + +#ifdef CONFIG_RWNX_AMSDUS_TX + txq->amsdu_len = txhdr->hw_hdr.cfm.amsdu_size; +#endif + + /* Update statistics */ + sw_txhdr->rwnx_vif->net_stats.tx_packets++; + sw_txhdr->rwnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len; + + /* Release SKBs */ +#ifdef CONFIG_RWNX_AMSDUS_TX + if (sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU) { + struct rwnx_amsdu_txhdr *amsdu_txhdr; + list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) { + rwnx_amsdu_del_subframe_header(amsdu_txhdr); + consume_skb(amsdu_txhdr->skb); + } + } +#endif /* CONFIG_RWNX_AMSDUS_TX */ + + headroom = sw_txhdr->headroom; + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + skb_pull(skb, headroom); + consume_skb(skb); + + return 0; +} + +/** + * rwnx_txq_credit_update - Update credit for one txq + * + * @rwnx_hw: Driver main data + * @sta_idx: STA idx + * @tid: TID + * @update: offset to apply in txq credits + * + * Called when fw send ME_TX_CREDITS_UPDATE_IND message. + * Apply @update to txq credits, and stop/start the txq if needed + */ +void rwnx_txq_credit_update(struct rwnx_hw *rwnx_hw, int sta_idx, u8 tid, + s8 update) +{ + struct rwnx_sta *sta = &rwnx_hw->sta_table[sta_idx]; + struct rwnx_txq *txq; + + txq = rwnx_txq_sta_get(sta, tid, rwnx_hw); + + spin_lock_bh(&rwnx_hw->tx_lock); + + if (txq->idx != TXQ_INACTIVE) { + //txq->credits += update; +#ifdef CREATE_TRACE_POINTS + trace_credit_update(txq, update); +#endif + if (txq->credits <= 0) + rwnx_txq_stop(txq, RWNX_TXQ_STOP_FULL); + else + rwnx_txq_start(txq, RWNX_TXQ_STOP_FULL); + } + + spin_unlock_bh(&rwnx_hw->tx_lock); +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tx.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tx.h new file mode 100644 index 0000000..eb64c77 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_tx.h @@ -0,0 +1,202 @@ +/** + ****************************************************************************** + * + * @file rwnx_tx.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ +#ifndef _RWNX_TX_H_ +#define _RWNX_TX_H_ + +#include +#include +#include +#include "lmac_types.h" +#include "ipc_shared.h" +#include "rwnx_txq.h" +#include "hal_desc.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) +#define IEEE80211_NUM_TIDS 16 +#endif + +#define RWNX_HWQ_BK 0 +#define RWNX_HWQ_BE 1 +#define RWNX_HWQ_VI 2 +#define RWNX_HWQ_VO 3 +#define RWNX_HWQ_BCMC 4 +#define RWNX_HWQ_NB NX_TXQ_CNT +#define RWNX_HWQ_ALL_ACS (RWNX_HWQ_BK | RWNX_HWQ_BE | RWNX_HWQ_VI | RWNX_HWQ_VO) +#define RWNX_HWQ_ALL_ACS_BIT ( BIT(RWNX_HWQ_BK) | BIT(RWNX_HWQ_BE) | \ + BIT(RWNX_HWQ_VI) | BIT(RWNX_HWQ_VO) ) + +#define RWNX_TX_LIFETIME_MS 1000 +#define RWNX_TX_MAX_RATES NX_TX_MAX_RATES + +#define RWNX_SWTXHDR_ALIGN_SZ 4 +#define RWNX_SWTXHDR_ALIGN_MSK (RWNX_SWTXHDR_ALIGN_SZ - 1) +#define RWNX_SWTXHDR_ALIGN_PADS(x) \ + ((RWNX_SWTXHDR_ALIGN_SZ - ((x) & RWNX_SWTXHDR_ALIGN_MSK)) \ + & RWNX_SWTXHDR_ALIGN_MSK) +#if RWNX_SWTXHDR_ALIGN_SZ & RWNX_SWTXHDR_ALIGN_MSK +#error bad RWNX_SWTXHDR_ALIGN_SZ +#endif + +#define AMSDU_PADDING(x) ((4 - ((x) & 0x3)) & 0x3) + +#define TXU_CNTRL_RETRY BIT(0) +#define TXU_CNTRL_MORE_DATA BIT(2) +#define TXU_CNTRL_MGMT BIT(3) +#define TXU_CNTRL_MGMT_NO_CCK BIT(4) +#define TXU_CNTRL_AMSDU BIT(6) +#define TXU_CNTRL_MGMT_ROBUST BIT(7) +#define TXU_CNTRL_USE_4ADDR BIT(8) +#define TXU_CNTRL_EOSP BIT(9) +#define TXU_CNTRL_MESH_FWD BIT(10) +#define TXU_CNTRL_TDLS BIT(11) + +extern const int rwnx_tid2hwq[IEEE80211_NUM_TIDS]; + +/** + * struct rwnx_amsdu_txhdr - Structure added in skb headroom (instead of + * rwnx_txhdr) for amsdu subframe buffer (except for the first subframe + * that has a normal rwnx_txhdr) + * + * @list List of other amsdu subframe (rwnx_sw_txhdr.amsdu.hdrs) + * @map_len Length to be downloaded for this subframe + * @dma_addr Buffer address form embedded point of view + * @skb skb + * @pad padding added before this subframe + * (only use when amsdu must be dismantled) + * @msdu_len Size, in bytes, of the MSDU (without padding nor amsdu header) + */ +struct rwnx_amsdu_txhdr { + struct list_head list; + size_t map_len; + dma_addr_t dma_addr; + struct sk_buff *skb; + u16 pad; + u16 msdu_len; +}; + +/** + * struct rwnx_amsdu - Structure to manage creation of an A-MSDU, updated + * only In the first subframe of an A-MSDU + * + * @hdrs List of subframe of rwnx_amsdu_txhdr + * @len Current size for this A-MDSU (doesn't take padding into account) + * 0 means that no amsdu is in progress + * @nb Number of subframe in the amsdu + * @pad Padding to add before adding a new subframe + */ +struct rwnx_amsdu { + struct list_head hdrs; + u16 len; + u8 nb; + u8 pad; +}; + +/** + * struct rwnx_sw_txhdr - Software part of tx header + * + * @rwnx_sta sta to which this buffer is addressed + * @rwnx_vif vif that send the buffer + * @txq pointer to TXQ used to send the buffer + * @hw_queue Index of the HWQ used to push the buffer. + * May be different than txq->hwq->id on confirmation. + * @frame_len Size of the frame (doesn't not include mac header) + * (Only used to update stat, can't we use skb->len instead ?) + * @headroom Headroom added in skb to add rwnx_txhdr + * (Only used to remove it before freeing skb, is it needed ?) + * @amsdu Description of amsdu whose first subframe is this buffer + * (amsdu.nb = 0 means this buffer is not part of amsdu) + * @skb skb received from transmission + * @map_len Length mapped for DMA (only rwnx_hw_txhdr and data are mapped) + * @dma_addr DMA address after mapping + * @desc Buffer description that will be copied in shared mem for FW + */ +struct rwnx_sw_txhdr { + struct rwnx_sta *rwnx_sta; + struct rwnx_vif *rwnx_vif; + struct rwnx_txq *txq; + u8 hw_queue; + u16 frame_len; + u16 headroom; +#ifdef CONFIG_RWNX_AMSDUS_TX + struct rwnx_amsdu amsdu; +#endif + u32 need_cfm; + struct sk_buff *skb; + + size_t map_len; + dma_addr_t dma_addr; + struct txdesc_api desc; + u8 raw_frame; + u8 fixed_rate; + u16 rate_config; +}; + +/** + * struct rwnx_txhdr - Stucture to control transimission of packet + * (Added in skb headroom) + * + * @sw_hdr: Information from driver + * @cache_guard: + * @hw_hdr: Information for/from hardware + */ +struct rwnx_txhdr { + struct rwnx_sw_txhdr *sw_hdr; + char cache_guard[L1_CACHE_BYTES]; + struct rwnx_hw_txhdr hw_hdr; +}; + +u16 rwnx_select_txq(struct rwnx_vif *rwnx_vif, struct sk_buff *skb); +netdev_tx_t rwnx_start_xmit(struct sk_buff *skb, struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta, + struct cfg80211_mgmt_tx_params *params, bool offchan, + u64 *cookie); +#else +int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta, + struct ieee80211_channel *channel, bool offchan, + unsigned int wait, const u8* buf, size_t len, + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + bool no_cck, + #endif + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + bool dont_wait_for_ack, + #endif + u64 *cookie); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ +#ifdef CONFIG_RWNX_MON_XMIT +int rwnx_start_monitor_if_xmit(struct sk_buff *skb, struct net_device *dev); +#endif +int rwnx_txdatacfm(void *pthis, void *host_id); + +struct rwnx_hw; +struct rwnx_sta; +void rwnx_set_traffic_status(struct rwnx_hw *rwnx_hw, + struct rwnx_sta *sta, + bool available, + u8 ps_id); +void rwnx_ps_bh_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + bool enable); +void rwnx_ps_bh_traffic_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, + u16 pkt_req, u8 ps_id); + +void rwnx_switch_vif_sta_txq(struct rwnx_sta *sta, struct rwnx_vif *old_vif, + struct rwnx_vif *new_vif); + +int rwnx_dbgfs_print_sta(char *buf, size_t size, struct rwnx_sta *sta, + struct rwnx_hw *rwnx_hw); +void rwnx_txq_credit_update(struct rwnx_hw *rwnx_hw, int sta_idx, u8 tid, + s8 update); +void rwnx_tx_push(struct rwnx_hw *rwnx_hw, struct rwnx_txhdr *txhdr, int flags); +#ifdef CONFIG_BAND_STEERING +void rwnx_probersp_work(struct work_struct *work); +#endif + + +#endif /* _RWNX_TX_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_txq.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_txq.c new file mode 100644 index 0000000..5b71597 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_txq.c @@ -0,0 +1,1366 @@ +/** + ****************************************************************************** + * + * @file rwnx_txq.c + * + * Copyright (C) RivieraWaves 2016-2019 + * + ****************************************************************************** + */ + +#include "rwnx_defs.h" +#include "rwnx_tx.h" +#include "ipc_host.h" +#include "rwnx_events.h" +#include "rwnx_gki.h" + +/****************************************************************************** + * Utils functions + *****************************************************************************/ +#ifdef CONFIG_RWNX_FULLMAC +const int nx_tid_prio[NX_NB_TID_PER_STA] = {7, 6, 5, 4, 3, 0, 2, 1}; + +static inline int rwnx_txq_sta_idx(struct rwnx_sta *sta, u8 tid) +{ + if (is_multicast_sta(sta->sta_idx)){ + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + return NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC + sta->vif_idx; + }else{ + return NX_FIRST_VIF_TXQ_IDX + sta->vif_idx; + } + }else{ + return (sta->sta_idx * NX_NB_TXQ_PER_STA) + tid; + } +} + +static inline int rwnx_txq_vif_idx(struct rwnx_vif *vif, u8 type) +{ + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + return NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC + master_vif_idx(vif) + (type * NX_VIRT_DEV_MAX); + }else{ + return NX_FIRST_VIF_TXQ_IDX + master_vif_idx(vif) + (type * NX_VIRT_DEV_MAX); + } +} + +struct rwnx_txq *rwnx_txq_sta_get(struct rwnx_sta *sta, u8 tid, + struct rwnx_hw * rwnx_hw) +{ + if (tid >= NX_NB_TXQ_PER_STA) + tid = 0; + + return &rwnx_hw->txq[rwnx_txq_sta_idx(sta, tid)]; +} + +struct rwnx_txq *rwnx_txq_vif_get(struct rwnx_vif *vif, u8 type) +{ + if (type > NX_UNK_TXQ_TYPE) + type = NX_BCMC_TXQ_TYPE; + + return &vif->rwnx_hw->txq[rwnx_txq_vif_idx(vif, type)]; +} + +static inline struct rwnx_sta *rwnx_txq_2_sta(struct rwnx_txq *txq) +{ + return txq->sta; +} + +#endif /* CONFIG_RWNX_FULLMAC */ + + +/****************************************************************************** + * Init/Deinit functions + *****************************************************************************/ +/** + * rwnx_txq_init - Initialize a TX queue + * + * @txq: TX queue to be initialized + * @idx: TX queue index + * @status: TX queue initial status + * @hwq: Associated HW queue + * @ndev: Net device this queue belongs to + * (may be null for non netdev txq) + * + * Each queue is initialized with the credit of @NX_TXQ_INITIAL_CREDITS. + */ +static void rwnx_txq_init(struct rwnx_txq *txq, int idx, u8 status, + struct rwnx_hwq *hwq, int tid, +#ifdef CONFIG_RWNX_FULLMAC + struct rwnx_sta *sta, struct net_device *ndev +#endif + ) +{ + int i; + int nx_first_unk_txq_idx = NX_FIRST_UNK_TXQ_IDX; + int nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX; + int nx_first_vif_txq_idx = NX_FIRST_VIF_TXQ_IDX; + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_first_unk_txq_idx = NX_FIRST_UNK_TXQ_IDX_FOR_OLD_IC; + nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC; + nx_first_vif_txq_idx = NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC; + } + + + txq->idx = idx; + txq->status = status; + txq->credits = NX_TXQ_INITIAL_CREDITS; + txq->pkt_sent = 0; + skb_queue_head_init(&txq->sk_list); + txq->last_retry_skb = NULL; + txq->nb_retry = 0; + txq->hwq = hwq; + txq->sta = sta; + for (i = 0; i < CONFIG_USER_MAX ; i++) + txq->pkt_pushed[i] = 0; + txq->push_limit = 0; + txq->tid = tid; +#ifdef CONFIG_MAC80211_TXQ + txq->nb_ready_mac80211 = 0; +#endif +#ifdef CONFIG_RWNX_FULLMAC + txq->ps_id = LEGACY_PS_ID; + if (idx < nx_first_vif_txq_idx) { + int sta_idx = sta->sta_idx; + int tid = idx - (sta_idx * NX_NB_TXQ_PER_STA); + if (tid < NX_NB_TID_PER_STA) + txq->ndev_idx = NX_STA_NDEV_IDX(tid, sta_idx); + else + txq->ndev_idx = NDEV_NO_TXQ; + } else if (idx < nx_first_unk_txq_idx) { + txq->ndev_idx = nx_bcmc_txq_ndev_idx; + } else { + txq->ndev_idx = NDEV_NO_TXQ; + } + txq->ndev = ndev; +#ifdef CONFIG_RWNX_AMSDUS_TX + txq->amsdu = NULL; + txq->amsdu_len = 0; +#endif /* CONFIG_RWNX_AMSDUS_TX */ +#endif /* CONFIG_RWNX_FULLMAC */ +} + +/** + * rwnx_txq_flush - Flush all buffers queued for a TXQ + * + * @rwnx_hw: main driver data + * @txq: txq to flush + */ +void rwnx_txq_flush(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq) +{ + struct sk_buff *skb; + + + while((skb = skb_dequeue(&txq->sk_list)) != NULL) { + struct rwnx_sw_txhdr *sw_txhdr = ((struct rwnx_txhdr *)skb->data)->sw_hdr; + +#ifdef CONFIG_RWNX_AMSDUS_TX + if (sw_txhdr->desc.host.packet_cnt > 1) { + struct rwnx_amsdu_txhdr *amsdu_txhdr; + list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) { + //dma_unmap_single(rwnx_hw->dev, amsdu_txhdr->dma_addr, + // amsdu_txhdr->map_len, DMA_TO_DEVICE); + dev_kfree_skb_any(amsdu_txhdr->skb); + } + } +#endif + kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); + //dma_unmap_single(rwnx_hw->dev, sw_txhdr->dma_addr, sw_txhdr->map_len, + // DMA_TO_DEVICE); + +#ifdef CONFIG_RWNX_FULLMAC + dev_kfree_skb_any(skb); +#endif /* CONFIG_RWNX_FULLMAC */ + } +} + +/** + * rwnx_txq_deinit - De-initialize a TX queue + * + * @rwnx_hw: Driver main data + * @txq: TX queue to be de-initialized + * Any buffer stuck in a queue will be freed. + */ +static void rwnx_txq_deinit(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq) +{ + if (txq->idx == TXQ_INACTIVE) + return; + + spin_lock_bh(&rwnx_hw->tx_lock); + rwnx_txq_del_from_hw_list(txq); + txq->idx = TXQ_INACTIVE; + spin_unlock_bh(&rwnx_hw->tx_lock); + + rwnx_txq_flush(rwnx_hw, txq); +} + +/** + * rwnx_txq_vif_init - Initialize all TXQ linked to a vif + * + * @rwnx_hw: main driver data + * @rwnx_vif: Pointer on VIF + * @status: Intial txq status + * + * Softmac : 1 VIF TXQ per HWQ + * + * Fullmac : 1 VIF TXQ for BC/MC + * 1 VIF TXQ for MGMT to unknown STA + */ +void rwnx_txq_vif_init(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + u8 status) +{ + struct rwnx_txq *txq; + int idx; + +#ifdef CONFIG_RWNX_FULLMAC + txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE); + idx = rwnx_txq_vif_idx(rwnx_vif, NX_BCMC_TXQ_TYPE); + rwnx_txq_init(txq, idx, status, &rwnx_hw->hwq[RWNX_HWQ_BE], 0, + &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index], rwnx_vif->ndev); + + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + idx = rwnx_txq_vif_idx(rwnx_vif, NX_UNK_TXQ_TYPE); + rwnx_txq_init(txq, idx, status, &rwnx_hw->hwq[RWNX_HWQ_VO], TID_MGT, + NULL, rwnx_vif->ndev); + +#endif /* CONFIG_RWNX_FULLMAC */ +} + +/** + * rwnx_txq_vif_deinit - Deinitialize all TXQ linked to a vif + * + * @rwnx_hw: main driver data + * @rwnx_vif: Pointer on VIF + */ +void rwnx_txq_vif_deinit(struct rwnx_hw * rwnx_hw, struct rwnx_vif *rwnx_vif) +{ + struct rwnx_txq *txq; + +#ifdef CONFIG_RWNX_FULLMAC + txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE); + rwnx_txq_deinit(rwnx_hw, txq); + + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + rwnx_txq_deinit(rwnx_hw, txq); + +#endif /* CONFIG_RWNX_FULLMAC */ +} + + +/** + * rwnx_txq_sta_init - Initialize TX queues for a STA + * + * @rwnx_hw: Main driver data + * @rwnx_sta: STA for which tx queues need to be initialized + * @status: Intial txq status + * + * This function initialize all the TXQ associated to a STA. + * Softmac : 1 TXQ per TID + * + * Fullmac : 1 TXQ per TID (limited to 8) + * 1 TXQ for MGMT + */ +void rwnx_txq_sta_init(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta, + u8 status) +{ + struct rwnx_txq *txq; + int tid, idx; + +#ifdef CONFIG_RWNX_FULLMAC + struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[rwnx_sta->vif_idx]; + idx = rwnx_txq_sta_idx(rwnx_sta, 0); + + foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) { + rwnx_txq_init(txq, idx, status, &rwnx_hw->hwq[rwnx_tid2hwq[tid]], tid, + rwnx_sta, rwnx_vif->ndev); + txq->ps_id = rwnx_sta->uapsd_tids & (1 << tid) ? UAPSD_ID : LEGACY_PS_ID; + idx++; + } + +#endif /* CONFIG_RWNX_FULLMAC*/ +} + +/** + * rwnx_txq_sta_deinit - Deinitialize TX queues for a STA + * + * @rwnx_hw: Main driver data + * @rwnx_sta: STA for which tx queues need to be deinitialized + */ +void rwnx_txq_sta_deinit(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta) +{ + struct rwnx_txq *txq; + int tid; + + foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) { + rwnx_txq_deinit(rwnx_hw, txq); + } +} + +#ifdef CONFIG_RWNX_FULLMAC +/** + * rwnx_txq_unk_vif_init - Initialize TXQ for unknown STA linked to a vif + * + * @rwnx_vif: Pointer on VIF + */ +void rwnx_txq_unk_vif_init(struct rwnx_vif *rwnx_vif) +{ + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_txq *txq; + int idx; + + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + idx = rwnx_txq_vif_idx(rwnx_vif, NX_UNK_TXQ_TYPE); + rwnx_txq_init(txq, idx, 0, &rwnx_hw->hwq[RWNX_HWQ_VO], TID_MGT, NULL, rwnx_vif->ndev); +} + +/** + * rwnx_txq_tdls_vif_deinit - Deinitialize TXQ for unknown STA linked to a vif + * + * @rwnx_vif: Pointer on VIF + */ +void rwnx_txq_unk_vif_deinit(struct rwnx_vif *rwnx_vif) +{ + struct rwnx_txq *txq; + + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + rwnx_txq_deinit(rwnx_vif->rwnx_hw, txq); +} + +/** + * rwnx_init_unk_txq - Initialize TX queue for the transmission on a offchannel + * + * @vif: Interface for which the queue has to be initialized + * + * NOTE: Offchannel txq is only active for the duration of the ROC + */ +void rwnx_txq_offchan_init(struct rwnx_vif *rwnx_vif) +{ + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + struct rwnx_txq *txq; + int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX; + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC; + } + + + txq = &rwnx_hw->txq[nx_off_chan_txq_idx]; + rwnx_txq_init(txq, nx_off_chan_txq_idx, RWNX_TXQ_STOP_CHAN, + &rwnx_hw->hwq[RWNX_HWQ_VO], TID_MGT, NULL, rwnx_vif->ndev); +} + +/** + * rwnx_deinit_offchan_txq - Deinitialize TX queue for offchannel + * + * @vif: Interface that manages the STA + * + * This function deintialize txq for one STA. + * Any buffer stuck in a queue will be freed. + */ +void rwnx_txq_offchan_deinit(struct rwnx_vif *rwnx_vif) +{ + struct rwnx_txq *txq; + int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX; + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC; + } + + txq = &rwnx_vif->rwnx_hw->txq[nx_off_chan_txq_idx]; + rwnx_txq_deinit(rwnx_vif->rwnx_hw, txq); +} + + +/** + * rwnx_txq_tdls_vif_init - Initialize TXQ vif for TDLS + * + * @rwnx_vif: Pointer on VIF + */ +void rwnx_txq_tdls_vif_init(struct rwnx_vif *rwnx_vif) +{ + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + + if (!(rwnx_hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) + return; + + rwnx_txq_unk_vif_init(rwnx_vif); +} + +/** + * rwnx_txq_tdls_vif_deinit - Deinitialize TXQ vif for TDLS + * + * @rwnx_vif: Pointer on VIF + */ +void rwnx_txq_tdls_vif_deinit(struct rwnx_vif *rwnx_vif) +{ + struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw; + + if (!(rwnx_hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) + return; + + rwnx_txq_unk_vif_deinit(rwnx_vif); +} +#endif + +/****************************************************************************** + * Start/Stop functions + *****************************************************************************/ +/** + * rwnx_txq_add_to_hw_list - Add TX queue to a HW queue schedule list. + * + * @txq: TX queue to add + * + * Add the TX queue if not already present in the HW queue list. + * To be called with tx_lock hold + */ +void rwnx_txq_add_to_hw_list(struct rwnx_txq *txq) +{ + if (!(txq->status & RWNX_TXQ_IN_HWQ_LIST)) { +#ifdef CREATE_TRACE_POINTS + trace_txq_add_to_hw(txq); +#endif + txq->status |= RWNX_TXQ_IN_HWQ_LIST; + list_add_tail(&txq->sched_list, &txq->hwq->list); + txq->hwq->need_processing = true; + } +} + +/** + * rwnx_txq_del_from_hw_list - Delete TX queue from a HW queue schedule list. + * + * @txq: TX queue to delete + * + * Remove the TX queue from the HW queue list if present. + * To be called with tx_lock hold + */ +void rwnx_txq_del_from_hw_list(struct rwnx_txq *txq) +{ + if (txq->status & RWNX_TXQ_IN_HWQ_LIST) { +#ifdef CREATE_TRACE_POINTS + trace_txq_del_from_hw(txq); +#endif + txq->status &= ~RWNX_TXQ_IN_HWQ_LIST; + list_del(&txq->sched_list); + } +} + +/** + * rwnx_txq_skb_ready - Check if skb are available for the txq + * + * @txq: Pointer on txq + * @return True if there are buffer ready to be pushed on this txq, + * false otherwise + */ +static inline bool rwnx_txq_skb_ready(struct rwnx_txq *txq) +{ +#ifdef CONFIG_MAC80211_TXQ + if (txq->nb_ready_mac80211 != NOT_MAC80211_TXQ) + return ((txq->nb_ready_mac80211 > 0) || !skb_queue_empty(&txq->sk_list)); + else +#endif + return !skb_queue_empty(&txq->sk_list); +} + +/** + * rwnx_txq_start - Try to Start one TX queue + * + * @txq: TX queue to start + * @reason: reason why the TX queue is started (among RWNX_TXQ_STOP_xxx) + * + * Re-start the TX queue for one reason. + * If after this the txq is no longer stopped and some buffers are ready, + * the TX queue is also added to HW queue list. + * To be called with tx_lock hold + */ +void rwnx_txq_start(struct rwnx_txq *txq, u16 reason) +{ + BUG_ON(txq==NULL); + if (txq->idx != TXQ_INACTIVE && (txq->status & reason)) + { +#ifdef CREATE_TRACE_POINTS + trace_txq_start(txq, reason); +#endif + txq->status &= ~reason; + if (!rwnx_txq_is_stopped(txq) && rwnx_txq_skb_ready(txq)) + rwnx_txq_add_to_hw_list(txq); + } +} + +/** + * rwnx_txq_stop - Stop one TX queue + * + * @txq: TX queue to stop + * @reason: reason why the TX queue is stopped (among RWNX_TXQ_STOP_xxx) + * + * Stop the TX queue. It will remove the TX queue from HW queue list + * To be called with tx_lock hold + */ +void rwnx_txq_stop(struct rwnx_txq *txq, u16 reason) +{ + BUG_ON(txq==NULL); + if (txq->idx != TXQ_INACTIVE) + { +#ifdef CREATE_TRACE_POINTS + trace_txq_stop(txq, reason); +#endif + txq->status |= reason; + rwnx_txq_del_from_hw_list(txq); + } +} + + +/** + * rwnx_txq_sta_start - Start all the TX queue linked to a STA + * + * @sta: STA whose TX queues must be re-started + * @reason: Reason why the TX queue are restarted (among RWNX_TXQ_STOP_xxx) + * @rwnx_hw: Driver main data + * + * This function will re-start all the TX queues of the STA for the reason + * specified. It can be : + * - RWNX_TXQ_STOP_STA_PS: the STA is no longer in power save mode + * - RWNX_TXQ_STOP_VIF_PS: the VIF is in power save mode (p2p absence) + * - RWNX_TXQ_STOP_CHAN: the STA's VIF is now on the current active channel + * + * Any TX queue with buffer ready and not Stopped for other reasons, will be + * added to the HW queue list + * To be called with tx_lock hold + */ +void rwnx_txq_sta_start(struct rwnx_sta *rwnx_sta, u16 reason +#ifdef CONFIG_RWNX_FULLMAC + , struct rwnx_hw *rwnx_hw +#endif + ) +{ + struct rwnx_txq *txq; + int tid; +#ifdef CREATE_TRACE_POINTS + trace_txq_sta_start(rwnx_sta->sta_idx); +#endif + foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) { + rwnx_txq_start(txq, reason); + } +} + + +/** + * rwnx_stop_sta_txq - Stop all the TX queue linked to a STA + * + * @sta: STA whose TX queues must be stopped + * @reason: Reason why the TX queue are stopped (among RWNX_TX_STOP_xxx) + * @rwnx_hw: Driver main data + * + * This function will stop all the TX queues of the STA for the reason + * specified. It can be : + * - RWNX_TXQ_STOP_STA_PS: the STA is in power save mode + * - RWNX_TXQ_STOP_VIF_PS: the VIF is in power save mode (p2p absence) + * - RWNX_TXQ_STOP_CHAN: the STA's VIF is not on the current active channel + * + * Any TX queue present in a HW queue list will be removed from this list. + * To be called with tx_lock hold + */ +void rwnx_txq_sta_stop(struct rwnx_sta *rwnx_sta, u16 reason +#ifdef CONFIG_RWNX_FULLMAC + , struct rwnx_hw *rwnx_hw +#endif + ) +{ + struct rwnx_txq *txq; + int tid; + + if (!rwnx_sta) + return; +#ifdef CREATE_TRACE_POINTS + trace_txq_sta_stop(rwnx_sta->sta_idx); +#endif + foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) { + rwnx_txq_stop(txq, reason); + } +} + +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_txq_tdls_sta_start(struct rwnx_vif *rwnx_vif, u16 reason, + struct rwnx_hw *rwnx_hw) +{ +#ifdef CREATE_TRACE_POINTS + trace_txq_vif_start(rwnx_vif->vif_index); +#endif + spin_lock_bh(&rwnx_hw->tx_lock); + + if (rwnx_vif->sta.tdls_sta) + rwnx_txq_sta_start(rwnx_vif->sta.tdls_sta, reason, rwnx_hw); + + spin_unlock_bh(&rwnx_hw->tx_lock); +} +#endif + +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_txq_tdls_sta_stop(struct rwnx_vif *rwnx_vif, u16 reason, + struct rwnx_hw *rwnx_hw) +{ +#ifdef CREATE_TRACE_POINTS + trace_txq_vif_stop(rwnx_vif->vif_index); +#endif + spin_lock_bh(&rwnx_hw->tx_lock); + + if (rwnx_vif->sta.tdls_sta) + rwnx_txq_sta_stop(rwnx_vif->sta.tdls_sta, reason, rwnx_hw); + + spin_unlock_bh(&rwnx_hw->tx_lock); +} +#endif + +#ifdef CONFIG_RWNX_FULLMAC +static inline +void rwnx_txq_vif_for_each_sta(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, + void (*f)(struct rwnx_sta *, u16, struct rwnx_hw *), + u16 reason) +{ + + switch (RWNX_VIF_TYPE(rwnx_vif)) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + { + if (rwnx_vif->tdls_status == TDLS_LINK_ACTIVE) + f(rwnx_vif->sta.tdls_sta, reason, rwnx_hw); + if (!WARN_ON(rwnx_vif->sta.ap == NULL)) + f(rwnx_vif->sta.ap, reason, rwnx_hw); + break; + } + case NL80211_IFTYPE_AP_VLAN: + rwnx_vif = rwnx_vif->ap_vlan.master; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_P2P_GO: + { + struct rwnx_sta *sta; + list_for_each_entry(sta, &rwnx_vif->ap.sta_list, list) { + f(sta, reason, rwnx_hw); + } + break; + } + default: + BUG(); + break; + } +} + +#endif + +/** + * rwnx_txq_vif_start - START TX queues of all STA associated to the vif + * and vif's TXQ + * + * @vif: Interface to start + * @reason: Start reason (RWNX_TXQ_STOP_CHAN or RWNX_TXQ_STOP_VIF_PS) + * @rwnx_hw: Driver main data + * + * Iterate over all the STA associated to the vif and re-start them for the + * reason @reason + * Take tx_lock + */ +void rwnx_txq_vif_start(struct rwnx_vif *rwnx_vif, u16 reason, + struct rwnx_hw *rwnx_hw) +{ + struct rwnx_txq *txq; +#ifdef CREATE_TRACE_POINTS + trace_txq_vif_start(rwnx_vif->vif_index); +#endif + spin_lock_bh(&rwnx_hw->tx_lock); + +#ifdef CONFIG_RWNX_FULLMAC + //Reject if monitor interface + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_MONITOR) + goto end; + + if (rwnx_vif->roc_tdls && rwnx_vif->sta.tdls_sta && rwnx_vif->sta.tdls_sta->tdls.chsw_en) { + rwnx_txq_sta_start(rwnx_vif->sta.tdls_sta, reason, rwnx_hw); + } + if (!rwnx_vif->roc_tdls) { + rwnx_txq_vif_for_each_sta(rwnx_hw, rwnx_vif, rwnx_txq_sta_start, reason); + } + + txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE); + rwnx_txq_start(txq, reason); + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + rwnx_txq_start(txq, reason); + +end: +#endif /* CONFIG_RWNX_FULLMAC */ + + spin_unlock_bh(&rwnx_hw->tx_lock); +} + + +/** + * rwnx_txq_vif_stop - STOP TX queues of all STA associated to the vif + * + * @vif: Interface to stop + * @arg: Stop reason (RWNX_TXQ_STOP_CHAN or RWNX_TXQ_STOP_VIF_PS) + * @rwnx_hw: Driver main data + * + * Iterate over all the STA associated to the vif and stop them for the + * reason RWNX_TXQ_STOP_CHAN or RWNX_TXQ_STOP_VIF_PS + * Take tx_lock + */ +void rwnx_txq_vif_stop(struct rwnx_vif *rwnx_vif, u16 reason, + struct rwnx_hw *rwnx_hw) +{ + struct rwnx_txq *txq; + + //RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifdef CREATE_TRACE_POINTS + trace_txq_vif_stop(rwnx_vif->vif_index); +#endif + spin_lock_bh(&rwnx_hw->tx_lock); + +#ifdef CONFIG_RWNX_FULLMAC + //Reject if monitor interface + if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_MONITOR) + goto end; + + rwnx_txq_vif_for_each_sta(rwnx_hw, rwnx_vif, rwnx_txq_sta_stop, reason); + + txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE); + rwnx_txq_stop(txq, reason); + txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE); + rwnx_txq_stop(txq, reason); + +end: +#endif /* CONFIG_RWNX_FULLMAC*/ + + spin_unlock_bh(&rwnx_hw->tx_lock); +} + +#ifdef CONFIG_RWNX_FULLMAC + +/** + * rwnx_start_offchan_txq - START TX queue for offchannel frame + * + * @rwnx_hw: Driver main data + */ +void rwnx_txq_offchan_start(struct rwnx_hw *rwnx_hw) +{ + struct rwnx_txq *txq; + int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX; + + if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) || + ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC || + g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){ + nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC; + } + + + txq = &rwnx_hw->txq[nx_off_chan_txq_idx]; + spin_lock_bh(&rwnx_hw->tx_lock); + rwnx_txq_start(txq, RWNX_TXQ_STOP_CHAN); + spin_unlock_bh(&rwnx_hw->tx_lock); +} + +/** + * rwnx_switch_vif_sta_txq - Associate TXQ linked to a STA to a new vif + * + * @sta: STA whose txq must be switched + * @old_vif: Vif currently associated to the STA (may no longer be active) + * @new_vif: vif which should be associated to the STA for now on + * + * This function will switch the vif (i.e. the netdev) associated to all STA's + * TXQ. This is used when AP_VLAN interface are created. + * If one STA is associated to an AP_vlan vif, it will be moved from the master + * AP vif to the AP_vlan vif. + * If an AP_vlan vif is removed, then STA will be moved back to mastert AP vif. + * + */ +void rwnx_txq_sta_switch_vif(struct rwnx_sta *sta, struct rwnx_vif *old_vif, + struct rwnx_vif *new_vif) +{ + struct rwnx_hw *rwnx_hw = new_vif->rwnx_hw; + struct rwnx_txq *txq; + int i; + + /* start TXQ on the new interface, and update ndev field in txq */ + if (!netif_carrier_ok(new_vif->ndev)) + netif_carrier_on(new_vif->ndev); + txq = rwnx_txq_sta_get(sta, 0, rwnx_hw); + for (i = 0; i < NX_NB_TID_PER_STA; i++, txq++) { + txq->ndev = new_vif->ndev; + netif_wake_subqueue(txq->ndev, txq->ndev_idx); + } +} +#endif /* CONFIG_RWNX_FULLMAC */ + +/****************************************************************************** + * TXQ queue/schedule functions + *****************************************************************************/ +/** + * rwnx_txq_queue_skb - Queue a buffer in a TX queue + * + * @skb: Buffer to queue + * @txq: TX Queue in which the buffer must be added + * @rwnx_hw: Driver main data + * @retry: Should it be queued in the retry list + * + * @return: Retrun 1 if txq has been added to hwq list, 0 otherwise + * + * Add a buffer in the buffer list of the TX queue + * and add this TX queue in the HW queue list if the txq is not stopped. + * If this is a retry packet it is added after the last retry packet or at the + * beginning if there is no retry packet queued. + * + * If the STA is in PS mode and this is the first packet queued for this txq + * update TIM. + * + * To be called with tx_lock hold + */ +int rwnx_txq_queue_skb(struct sk_buff *skb, struct rwnx_txq *txq, + struct rwnx_hw *rwnx_hw, bool retry) +{ +#ifndef CONFIG_ONE_TXQ + unsigned long flags; +#endif + +#ifdef CONFIG_RWNX_FULLMAC + if (unlikely(txq->sta && txq->sta->ps.active)) { + txq->sta->ps.pkt_ready[txq->ps_id]++; +#ifdef CREATE_TRACE_POINT + trace_ps_queue(txq->sta); +#endif + if (txq->sta->ps.pkt_ready[txq->ps_id] == 1) { + rwnx_set_traffic_status(rwnx_hw, txq->sta, true, txq->ps_id); + } + } +#endif + + if (!retry) { + /* add buffer in the sk_list */ + skb_queue_tail(&txq->sk_list, skb); + } else { + if (txq->last_retry_skb) + rwnx_skb_append(txq->last_retry_skb, skb, &txq->sk_list); + else + skb_queue_head(&txq->sk_list, skb); + + txq->last_retry_skb = skb; + txq->nb_retry++; + } +#ifdef CREATE_TRACE_POINTS + trace_txq_queue_skb(skb, txq, retry); +#endif + /* Flowctrl corresponding netdev queue if needed */ +#ifdef CONFIG_RWNX_FULLMAC +#ifndef CONFIG_ONE_TXQ + /* If too many buffer are queued for this TXQ stop netdev queue */ + spin_lock_irqsave(&rwnx_hw->usbdev->tx_flow_lock, flags); + if ((txq->ndev_idx != NDEV_NO_TXQ) && !rwnx_hw->usbdev->tbusy && ((skb_queue_len(&txq->sk_list) > RWNX_NDEV_FLOW_CTRL_STOP))) { + txq->status |= RWNX_TXQ_NDEV_FLOW_CTRL; + netif_stop_subqueue(txq->ndev, txq->ndev_idx); +#ifdef CREATE_TRACE_POINTS + trace_txq_flowctrl_stop(txq); +#endif + } + spin_unlock_irqrestore(&rwnx_hw->usbdev->tx_flow_lock, flags); + +#endif /* CONFIG_ONE_TXQ */ +#else /* ! CONFIG_RWNX_FULLMAC */ + + if (!retry && ++txq->hwq->len == txq->hwq->len_stop) { +#ifdef CREATE_TRACE_POINTS + trace_hwq_flowctrl_stop(txq->hwq->id); +#endif + ieee80211_stop_queue(rwnx_hw->hw, txq->hwq->id); + rwnx_hw->stats.queues_stops++; + } + +#endif /* CONFIG_RWNX_FULLMAC */ + + /* add it in the hwq list if not stopped and not yet present */ + if (!rwnx_txq_is_stopped(txq)) { + rwnx_txq_add_to_hw_list(txq); + return 1; + } + + return 0; +} + +/** + * rwnx_txq_confirm_any - Process buffer confirmed by fw + * + * @rwnx_hw: Driver main data + * @txq: TX Queue + * @hwq: HW Queue + * @sw_txhdr: software descriptor of the confirmed packet + * + * Process a buffer returned by the fw. It doesn't check buffer status + * and only does systematic counter update: + * - hw credit + * - buffer pushed to fw + * + * To be called with tx_lock hold + */ +void rwnx_txq_confirm_any(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq, + struct rwnx_hwq *hwq, struct rwnx_sw_txhdr *sw_txhdr) +{ + int user = 0; + +#ifdef CONFIG_RWNX_MUMIMO_TX + int group_id; + + user = RWNX_MUMIMO_INFO_POS_ID(sw_txhdr->desc.host.mumimo_info); + group_id = RWNX_MUMIMO_INFO_GROUP_ID(sw_txhdr->desc.host.mumimo_info); + + if ((txq->idx != TXQ_INACTIVE) && + (txq->pkt_pushed[user] == 1) && + (txq->status & RWNX_TXQ_STOP_MU_POS)) + rwnx_txq_start(txq, RWNX_TXQ_STOP_MU_POS); + +#endif /* CONFIG_RWNX_MUMIMO_TX */ + + if (txq->pkt_pushed[user]) + txq->pkt_pushed[user]--; + + hwq->need_processing = true; + rwnx_hw->stats.cfm_balance[hwq->id]--; +} + +/****************************************************************************** + * HWQ processing + *****************************************************************************/ +static inline +bool rwnx_txq_take_mu_lock(struct rwnx_hw *rwnx_hw) +{ + bool res = false; +#ifdef CONFIG_RWNX_MUMIMO_TX + if (rwnx_hw->mod_params->mutx) + res = (down_trylock(&rwnx_hw->mu.lock) == 0); +#endif /* CONFIG_RWNX_MUMIMO_TX */ + return res; +} + +static inline +void rwnx_txq_release_mu_lock(struct rwnx_hw *rwnx_hw) +{ +#ifdef CONFIG_RWNX_MUMIMO_TX + up(&rwnx_hw->mu.lock); +#endif /* CONFIG_RWNX_MUMIMO_TX */ +} + +static inline +void rwnx_txq_set_mu_info(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq, + int group_id, int pos) +{ +#ifdef CONFIG_RWNX_MUMIMO_TX + trace_txq_select_mu_group(txq, group_id, pos); + if (group_id) { + txq->mumimo_info = group_id | (pos << 6); + rwnx_mu_set_active_group(rwnx_hw, group_id); + } else + txq->mumimo_info = 0; +#endif /* CONFIG_RWNX_MUMIMO_TX */ +} + +static inline +s8 rwnx_txq_get_credits(struct rwnx_txq *txq) +{ + s8 cred = txq->credits; + /* if destination is in PS mode, push_limit indicates the maximum + number of packet that can be pushed on this txq. */ + if (txq->push_limit && (cred > txq->push_limit)) { + cred = txq->push_limit; + } + return cred; +} + +/** + * skb_queue_extract - Extract buffer from skb list + * + * @list: List of skb to extract from + * @head: List of skb to append to + * @nb_elt: Number of skb to extract + * + * extract the first @nb_elt of @list and append them to @head + * It is assume that: + * - @list contains more that @nb_elt + * - There is no need to take @list nor @head lock to modify them + */ +static inline void skb_queue_extract(struct sk_buff_head *list, + struct sk_buff_head *head, int nb_elt) +{ + int i; + struct sk_buff *first, *last, *ptr; + + first = ptr = list->next; + for (i = 0; i < nb_elt; i++) { + ptr = ptr->next; + } + last = ptr->prev; + + /* unlink nb_elt in list */ + list->qlen -= nb_elt; + list->next = ptr; + ptr->prev = (struct sk_buff *)list; + + /* append nb_elt at end of head */ + head->qlen += nb_elt; + last->next = (struct sk_buff *)head; + head->prev->next = first; + first->prev = head->prev; + head->prev = last; +} + + +#ifdef CONFIG_MAC80211_TXQ +/** + * rwnx_txq_mac80211_dequeue - Dequeue buffer from mac80211 txq and + * add them to push list + * + * @rwnx_hw: Main driver data + * @sk_list: List of buffer to push (initialized without lock) + * @txq: TXQ to dequeue buffers from + * @max: Max number of buffer to dequeue + * + * Dequeue buffer from mac80211 txq, prepare them for transmission and chain them + * to the list of buffer to push. + * + * @return true if no more buffer are queued in mac80211 txq and false otherwise. + */ +static bool rwnx_txq_mac80211_dequeue(struct rwnx_hw *rwnx_hw, + struct sk_buff_head *sk_list, + struct rwnx_txq *txq, int max) +{ + struct ieee80211_txq *mac_txq; + struct sk_buff *skb; + unsigned long mac_txq_len; + + if (txq->nb_ready_mac80211 == NOT_MAC80211_TXQ) + return true; + + mac_txq = container_of((void *)txq, struct ieee80211_txq, drv_priv); + + for (; max > 0; max--) { + skb = rwnx_tx_dequeue_prep(rwnx_hw, mac_txq); + if (skb == NULL) + return true; + + __skb_queue_tail(sk_list, skb); + } + + /* re-read mac80211 txq current length. + It is mainly for debug purpose to trace dropped packet. There is no + problems to have nb_ready_mac80211 != actual mac80211 txq length */ + ieee80211_txq_get_depth(mac_txq, &mac_txq_len, NULL); + if (txq->nb_ready_mac80211 > mac_txq_len) + trace_txq_drop(txq, txq->nb_ready_mac80211 - mac_txq_len); + txq->nb_ready_mac80211 = mac_txq_len; + + return (txq->nb_ready_mac80211 == 0); +} +#endif + +/** + * rwnx_txq_get_skb_to_push - Get list of buffer to push for one txq + * + * @rwnx_hw: main driver data + * @hwq: HWQ on wich buffers will be pushed + * @txq: TXQ to get buffers from + * @user: user postion to use + * @sk_list_push: list to update + * + * + * This function will returned a list of buffer to push for one txq. + * It will take into account the number of credit of the HWQ for this user + * position and TXQ (and push_limit). + * This allow to get a list that can be pushed without having to test for + * hwq/txq status after each push + * + * If a MU group has been selected for this txq, it will also update the + * counter for the group + * + * @return true if txq no longer have buffer ready after the ones returned. + * false otherwise + */ +static +bool rwnx_txq_get_skb_to_push(struct rwnx_hw *rwnx_hw, struct rwnx_hwq *hwq, + struct rwnx_txq *txq, int user, + struct sk_buff_head *sk_list_push) +{ + int nb_ready = skb_queue_len(&txq->sk_list); + int credits = rwnx_txq_get_credits(txq); + bool res = false; + + __skb_queue_head_init(sk_list_push); + + if (credits >= nb_ready) { + skb_queue_splice_init(&txq->sk_list, sk_list_push); +#ifdef CONFIG_MAC80211_TXQ + res = rwnx_txq_mac80211_dequeue(rwnx_hw, sk_list_push, txq, credits - nb_ready); + credits = skb_queue_len(sk_list_push); +#else + res = true; + credits = nb_ready; +#endif + } else { + skb_queue_extract(&txq->sk_list, sk_list_push, credits); + + /* When processing PS service period (i.e. push_limit != 0), no longer + process this txq if the buffers extracted will complete the SP for + this txq */ + if (txq->push_limit && (credits == txq->push_limit)) + res = true; + } + + rwnx_mu_set_active_sta(rwnx_hw, rwnx_txq_2_sta(txq), credits); + + return res; +} + +/** + * rwnx_txq_select_user - Select User queue for a txq + * + * @rwnx_hw: main driver data + * @mu_lock: true is MU lock is taken + * @txq: TXQ to select MU group for + * @hwq: HWQ for the TXQ + * @user: Updated with user position selected + * + * @return false if it is no possible to process this txq. + * true otherwise + * + * This function selects the MU group to use for a TXQ. + * The selection is done as follow: + * + * - return immediately for STA that don't belongs to any group and select + * group 0 / user 0 + * + * - If MU tx is disabled (by user mutx_on, or because mu group are being + * updated !mu_lock), select group 0 / user 0 + * + * - Use the best group selected by @rwnx_mu_group_sta_select. + * + * Each time a group is selected (except for the first case where sta + * doesn't belongs to a MU group), the function checks that no buffer is + * pending for this txq on another user position. If this is the case stop + * the txq (RWNX_TXQ_STOP_MU_POS) and return false. + * + */ +static +bool rwnx_txq_select_user(struct rwnx_hw *rwnx_hw, bool mu_lock, + struct rwnx_txq *txq, struct rwnx_hwq *hwq, int *user) +{ + int pos = 0; +#ifdef CONFIG_RWNX_MUMIMO_TX + int id, group_id = 0; + struct rwnx_sta *sta = rwnx_txq_2_sta(txq); + + /* for sta that belong to no group return immediately */ + if (!sta || !sta->group_info.cnt) + goto end; + + /* If MU is disabled, need to check user */ + if (!rwnx_hw->mod_params->mutx_on || !mu_lock) + goto check_user; + + /* Use the "best" group selected */ + group_id = sta->group_info.group; + + if (group_id > 0) + pos = rwnx_mu_group_sta_get_pos(rwnx_hw, sta, group_id); + + check_user: + /* check that we can push on this user position */ +#if CONFIG_USER_MAX == 2 + id = (pos + 1) & 0x1; + if (txq->pkt_pushed[id]) { + rwnx_txq_stop(txq, RWNX_TXQ_STOP_MU_POS); + return false; + } + +#else + for (id = 0 ; id < CONFIG_USER_MAX ; id++) { + if (id != pos && txq->pkt_pushed[id]) { + rwnx_txq_stop(txq, RWNX_TXQ_STOP_MU_POS); + return false; + } + } +#endif + + end: + rwnx_txq_set_mu_info(rwnx_hw, txq, group_id, pos); +#endif /* CONFIG_RWNX_MUMIMO_TX */ + + *user = pos; + return true; +} + + +/** + * rwnx_hwq_process - Process one HW queue list + * + * @rwnx_hw: Driver main data + * @hw_queue: HW queue index to process + * + * The function will iterate over all the TX queues linked in this HW queue + * list. For each TX queue, push as many buffers as possible in the HW queue. + * (NB: TX queue have at least 1 buffer, otherwise it wouldn't be in the list) + * - If TX queue no longer have buffer, remove it from the list and check next + * TX queue + * - If TX queue no longer have credits or has a push_limit (PS mode) and it + * is reached , remove it from the list and check next TX queue + * - If HW queue is full, update list head to start with the next TX queue on + * next call if current TX queue already pushed "too many" pkt in a row, and + * return + * + * To be called when HW queue list is modified: + * - when a buffer is pushed on a TX queue + * - when new credits are received + * - when a STA returns from Power Save mode or receives traffic request. + * - when Channel context change + * + * To be called with tx_lock hold + */ +#define ALL_HWQ_MASK ((1 << CONFIG_USER_MAX) - 1) + +void rwnx_hwq_process(struct rwnx_hw *rwnx_hw, struct rwnx_hwq *hwq) +{ + struct rwnx_txq *txq, *next; + int user, credit_map = 0; + bool mu_enable; +#ifndef CONFIG_ONE_TXQ + unsigned long flags; +#endif +#ifdef CREATE_TRACE_POINTS + trace_process_hw_queue(hwq); +#endif + hwq->need_processing = false; + + mu_enable = rwnx_txq_take_mu_lock(rwnx_hw); + if (!mu_enable) + credit_map = ALL_HWQ_MASK - 1; + + list_for_each_entry_safe(txq, next, &hwq->list, sched_list) { + struct rwnx_txhdr *txhdr = NULL; + struct sk_buff_head sk_list_push; + struct sk_buff *skb; + bool txq_empty; +#ifdef CREATE_TRACE_POINTS + trace_process_txq(txq); +#endif + /* sanity check for debug */ + BUG_ON(!(txq->status & RWNX_TXQ_IN_HWQ_LIST)); + if(txq->idx == TXQ_INACTIVE){ + AICWFDBG(LOGERROR, "%s txq->idx == TXQ_INACTIVE \r\n", __func__); + rwnx_txq_del_from_hw_list(txq); + rwnx_txq_flush(rwnx_hw, txq); + continue; + } + BUG_ON(txq->idx == TXQ_INACTIVE); + BUG_ON(txq->credits <= 0); + BUG_ON(!rwnx_txq_skb_ready(txq)); + + if (!rwnx_txq_select_user(rwnx_hw, mu_enable, txq, hwq, &user)) + continue; + + txq_empty = rwnx_txq_get_skb_to_push(rwnx_hw, hwq, txq, user, + &sk_list_push); + + while ((skb = __skb_dequeue(&sk_list_push)) != NULL) { + txhdr = (struct rwnx_txhdr *)skb->data; + rwnx_tx_push(rwnx_hw, txhdr, 0); + } + + if (txq_empty) { + rwnx_txq_del_from_hw_list(txq); + txq->pkt_sent = 0; + } else if (rwnx_txq_is_scheduled(txq)) { + /* txq not empty, + - To avoid starving need to process other txq in the list + - For better aggregation, need to send "as many consecutive + pkt as possible" for he same txq + ==> Add counter to trigger txq switch + */ + if (txq->pkt_sent > hwq->size) { + txq->pkt_sent = 0; + list_rotate_left(&hwq->list); + } + } + +#ifdef CONFIG_RWNX_FULLMAC + /* Unable to complete PS traffic request because of hwq credit */ + if (txq->push_limit && txq->sta) { + if (txq->ps_id == LEGACY_PS_ID) { + /* for legacy PS abort SP and wait next ps-poll */ + txq->sta->ps.sp_cnt[txq->ps_id] -= txq->push_limit; + txq->push_limit = 0; + } + /* for u-apsd need to complete the SP to send EOSP frame */ + } +#ifndef CONFIG_ONE_TXQ + /* restart netdev queue if number of queued buffer is below threshold */ + spin_lock_irqsave(&rwnx_hw->usbdev->tx_flow_lock, flags); + if (unlikely(txq->status & RWNX_TXQ_NDEV_FLOW_CTRL) && + skb_queue_len(&txq->sk_list) < RWNX_NDEV_FLOW_CTRL_RESTART) { + txq->status &= ~RWNX_TXQ_NDEV_FLOW_CTRL; + if(!rwnx_hw->usbdev->tbusy) + netif_wake_subqueue(txq->ndev, txq->ndev_idx); +#ifdef CREATE_TRACE_POINTS + trace_txq_flowctrl_restart(txq); +#endif + } + spin_unlock_irqrestore(&rwnx_hw->usbdev->tx_flow_lock, flags); +#endif /* CONFIG_ONE_TXQ */ +#endif /* CONFIG_RWNX_FULLMAC */ + } + + + if (mu_enable) + rwnx_txq_release_mu_lock(rwnx_hw); +} + +/** + * rwnx_hwq_process_all - Process all HW queue list + * + * @rwnx_hw: Driver main data + * + * Loop over all HWQ, and process them if needed + * To be called with tx_lock hold + */ +void rwnx_hwq_process_all(struct rwnx_hw *rwnx_hw) +{ + int id; + + rwnx_mu_group_sta_select(rwnx_hw); + + for (id = ARRAY_SIZE(rwnx_hw->hwq) - 1; id >= 0 ; id--) { + if (rwnx_hw->hwq[id].need_processing) { + rwnx_hwq_process(rwnx_hw, &rwnx_hw->hwq[id]); + } + } +} + +/** + * rwnx_hwq_init - Initialize all hwq structures + * + * @rwnx_hw: Driver main data + * + */ +void rwnx_hwq_init(struct rwnx_hw *rwnx_hw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(rwnx_hw->hwq); i++) { + struct rwnx_hwq *hwq = &rwnx_hw->hwq[i]; + + hwq->id = i; + hwq->size = nx_txdesc_cnt[i]; + INIT_LIST_HEAD(&hwq->list); + + } +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_txq.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_txq.h new file mode 100755 index 0000000..c019c6a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_txq.h @@ -0,0 +1,397 @@ +/** + **************************************************************************************** + * + * @file rwnx_txq.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + **************************************************************************************** + */ +#ifndef _RWNX_TXQ_H_ +#define _RWNX_TXQ_H_ + +#include +#include +#include + +#include + +#ifdef CONFIG_RWNX_FULLMAC +/** + * Fullmac TXQ configuration: + * - STA: 1 TXQ per TID (limited to 8) + * 1 TXQ for bufferable MGT frames + * - VIF: 1 TXQ for Multi/Broadcast + + * 1 TXQ for MGT for unknown STAs or non-bufferable MGT frames + * - 1 TXQ for offchannel transmissions + * + * + * Txq mapping looks like + * for NX_REMOTE_STA_MAX=10 and NX_VIRT_DEV_MAX=4 + * + * | TXQ | NDEV_ID | VIF | STA | TID | HWQ | + * |-----+---------+-----+-------+------+-----|- + * | 0 | 0 | | 0 | 0 | 1 | 9 TXQ per STA + * | 1 | 1 | | 0 | 1 | 0 | (8 data + 1 mgmt) + * | 2 | 2 | | 0 | 2 | 0 | + * | 3 | 3 | | 0 | 3 | 1 | + * | 4 | 4 | | 0 | 4 | 2 | + * | 5 | 5 | | 0 | 5 | 2 | + * | 6 | 6 | | 0 | 6 | 3 | + * | 7 | 7 | | 0 | 7 | 3 | + * | 8 | N/A | | 0 | MGMT | 3 | + * |-----+---------+-----+-------+------+-----|- + * | ... | | | | | | Same for all STAs + * |-----+---------+-----+-------+------+-----|- + * | 90 | 80 | 0 | BC/MC | 0 | 1/4 | 1 TXQ for BC/MC per VIF + * | ... | | | | | | + * | 93 | 80 | 3 | BC/MC | 0 | 1/4 | + * |-----+---------+-----+-------+------+-----|- + * | 94 | N/A | 0 | N/A | MGMT | 3 | 1 TXQ for unknown STA per VIF + * | ... | | | | | | + * | 97 | N/A | 3 | N/A | MGMT | 3 | + * |-----+---------+-----+-------+------+-----|- + * | 98 | N/A | | N/A | MGMT | 3 | 1 TXQ for offchannel frame + */ +#define NX_NB_TID_PER_STA 8 +#define NX_NB_TXQ_PER_STA (NX_NB_TID_PER_STA + 1) +#define NX_NB_TXQ_PER_VIF 2 +#define NX_NB_TXQ ((NX_NB_TXQ_PER_STA * NX_REMOTE_STA_MAX) + \ + (NX_NB_TXQ_PER_VIF * NX_VIRT_DEV_MAX) + 1) + +#define NX_FIRST_VIF_TXQ_IDX (NX_REMOTE_STA_MAX * NX_NB_TXQ_PER_STA) +#define NX_FIRST_BCMC_TXQ_IDX NX_FIRST_VIF_TXQ_IDX +#define NX_FIRST_UNK_TXQ_IDX (NX_FIRST_BCMC_TXQ_IDX + NX_VIRT_DEV_MAX) + +#define NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC (NX_REMOTE_STA_MAX_FOR_OLD_IC * NX_NB_TXQ_PER_STA) +#define NX_FIRST_BCMC_TXQ_IDX_FOR_OLD_IC NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC +#define NX_FIRST_UNK_TXQ_IDX_FOR_OLD_IC (NX_FIRST_BCMC_TXQ_IDX_FOR_OLD_IC + NX_VIRT_DEV_MAX) + + +#define NX_OFF_CHAN_TXQ_IDX (NX_FIRST_VIF_TXQ_IDX + \ + (NX_VIRT_DEV_MAX * NX_NB_TXQ_PER_VIF)) + +#define NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC (NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC + \ + (NX_VIRT_DEV_MAX * NX_NB_TXQ_PER_VIF)) + + +#define NX_BCMC_TXQ_TYPE 0 +#define NX_UNK_TXQ_TYPE 1 + +/** + * Each data TXQ is a netdev queue. TXQ to send MGT are not data TXQ as + * they did not recieved buffer from netdev interface. + * Need to allocate the maximum case. + * AP : all STAs + 1 BC/MC + */ +#define NX_NB_NDEV_TXQ ((NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX) + 1 ) +#define NX_NB_NDEV_TXQ_FOR_OLD_IC ((NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX_FOR_OLD_IC) + 1) + +#define NX_BCMC_TXQ_NDEV_IDX (NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX) +#define NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC (NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX_FOR_OLD_IC) +#define NX_STA_NDEV_IDX(tid, sta_idx) ((tid) + (sta_idx) * NX_NB_TID_PER_STA) +#define NDEV_NO_TXQ 0xffff +#if (NX_NB_NDEV_TXQ >= NDEV_NO_TXQ) +#error("Need to increase struct rwnx_txq->ndev_idx size") +#endif + +/* stop netdev queue when number of queued buffers if greater than this */ +#define RWNX_NDEV_FLOW_CTRL_STOP 64 +/* restart netdev queue when number of queued buffers is lower than this */ +#define RWNX_NDEV_FLOW_CTRL_RESTART 64 + +#endif /* CONFIG_RWNX_FULLMAC */ + +#define TXQ_INACTIVE 0xffff +#if (NX_NB_TXQ >= TXQ_INACTIVE) +#error("Need to increase struct rwnx_txq->idx size") +#endif + +#define NX_TXQ_INITIAL_CREDITS 64 + +/** + * TXQ tid sorted by decreasing priority + */ +extern const int nx_tid_prio[NX_NB_TID_PER_STA]; + +/** + * struct rwnx_hwq - Structure used to save information relative to + * an AC TX queue (aka HW queue) + * @list: List of TXQ, that have buffers ready for this HWQ + * @credits: available credit for the queue (i.e. nb of buffers that + * can be pushed to FW ) + * @id Id of the HWQ among RWNX_HWQ_.... + * @size size of the queue + * @need_processing Indicate if hwq should be processed + * @len number of packet ready to be pushed to fw for this HW queue + * @len_stop threshold to stop mac80211(i.e. netdev) queues. Stop queue when + * driver has more than @len_stop packets ready. + * @len_start threshold to wake mac8011 queues. Wake queue when driver has + * less than @len_start packets ready. + */ +struct rwnx_hwq { + struct list_head list; + u8 size; + u8 id; + bool need_processing; +}; + +/** + * enum rwnx_push_flags - Flags of pushed buffer + * + * @RWNX_PUSH_RETRY Pushing a buffer for retry + * @RWNX_PUSH_IMMEDIATE Pushing a buffer without queuing it first + */ +enum rwnx_push_flags { + RWNX_PUSH_RETRY = BIT(0), + RWNX_PUSH_IMMEDIATE = BIT(1), +}; + +/** + * enum rwnx_txq_flags - TXQ status flag + * + * @RWNX_TXQ_IN_HWQ_LIST: The queue is scheduled for transmission + * @RWNX_TXQ_STOP_FULL: No more credits for the queue + * @RWNX_TXQ_STOP_CSA: CSA is in progress + * @RWNX_TXQ_STOP_STA_PS: Destiniation sta is currently in power save mode + * @RWNX_TXQ_STOP_VIF_PS: Vif owning this queue is currently in power save mode + * @RWNX_TXQ_STOP_CHAN: Channel of this queue is not the current active channel + * @RWNX_TXQ_STOP_MU_POS: TXQ is stopped waiting for all the buffers pushed to + * fw to be confirmed + * @RWNX_TXQ_STOP: All possible reason to have a txq stopped + * @RWNX_TXQ_NDEV_FLOW_CTRL: associated netdev queue is currently stopped. + * Note: when a TXQ is flowctrl it is NOT stopped + */ +enum rwnx_txq_flags { + RWNX_TXQ_IN_HWQ_LIST = BIT(0), + RWNX_TXQ_STOP_FULL = BIT(1), + RWNX_TXQ_STOP_CSA = BIT(2), + RWNX_TXQ_STOP_STA_PS = BIT(3), + RWNX_TXQ_STOP_VIF_PS = BIT(4), + RWNX_TXQ_STOP_CHAN = BIT(5), + RWNX_TXQ_STOP_MU_POS = BIT(6), + RWNX_TXQ_STOP = (RWNX_TXQ_STOP_FULL | RWNX_TXQ_STOP_CSA | + RWNX_TXQ_STOP_STA_PS | RWNX_TXQ_STOP_VIF_PS | + RWNX_TXQ_STOP_CHAN) , + RWNX_TXQ_NDEV_FLOW_CTRL = BIT(7), +}; + + +/** + * struct rwnx_txq - Structure used to save information relative to + * a RA/TID TX queue + * + * @idx: Unique txq idx. Set to TXQ_INACTIVE if txq is not used. + * @status: bitfield of @rwnx_txq_flags. + * @credits: available credit for the queue (i.e. nb of buffers that + * can be pushed to FW). + * @pkt_sent: number of consecutive pkt sent without leaving HW queue list + * @pkt_pushed: number of pkt currently pending for transmission confirmation + * @sched_list: list node for HW queue schedule list (rwnx_hwq.list) + * @sk_list: list of buffers to push to fw + * @last_retry_skb: pointer on the last skb in @sk_list that is a retry. + * (retry skb are stored at the beginning of the list) + * NULL if no retry skb is queued in @sk_list + * @nb_retry: Number of retry packet queued. + * @hwq: Pointer on the associated HW queue. + * @push_limit: number of packet to push before removing the txq from hwq list. + * (we always have push_limit < skb_queue_len(sk_list)) + * @tid: TID + * + * FULLMAC specific + * @ps_id: Index to use for Power save mode (LEGACY or UAPSD) + * @ndev_idx: txq idx from netdev point of view (0xFF for non netdev queue) + * @ndev: pointer to ndev of the corresponding vif + * @amsdu: pointer to rwnx_sw_txhdr of the first subframe of the A-MSDU. + * NULL if no A-MSDU frame is in construction + * @amsdu_len: Maximum size allowed for an A-MSDU. 0 means A-MSDU not allowed + */ +struct rwnx_txq { + u16 idx; + u8 status; + s8 credits; + u8 pkt_sent; + u8 pkt_pushed[CONFIG_USER_MAX]; + struct list_head sched_list; + struct sk_buff_head sk_list; + struct sk_buff *last_retry_skb; + struct rwnx_hwq *hwq; + int nb_retry; + u8 push_limit; + u8 tid; +#ifdef CONFIG_MAC80211_TXQ + unsigned long nb_ready_mac80211; +#endif +#ifdef CONFIG_RWNX_FULLMAC + struct rwnx_sta *sta; + u8 ps_id; + u16 ndev_idx; + struct net_device *ndev; +#ifdef CONFIG_RWNX_AMSDUS_TX + struct rwnx_sw_txhdr *amsdu; + u16 amsdu_len; +#endif /* CONFIG_RWNX_AMSDUS_TX */ +#endif /* CONFIG_RWNX_FULLMAC */ +#ifdef CONFIG_RWNX_MUMIMO_TX + u8 mumimo_info; +#endif +}; + +struct rwnx_sta; +struct rwnx_vif; +struct rwnx_hw; +struct rwnx_sw_txhdr; + +#ifdef CONFIG_RWNX_MUMIMO_TX +#define RWNX_TXQ_GROUP_ID(txq) ((txq)->mumimo_info & 0x3f) +#define RWNX_TXQ_POS_ID(txq) (((txq)->mumimo_info >> 6) & 0x3) +#else +#define RWNX_TXQ_GROUP_ID(txq) 0 +#define RWNX_TXQ_POS_ID(txq) 0 +#endif /* CONFIG_RWNX_MUMIMO_TX */ + +static inline bool rwnx_txq_is_stopped(struct rwnx_txq *txq) +{ + return (txq->status & RWNX_TXQ_STOP); +} + +static inline bool rwnx_txq_is_full(struct rwnx_txq *txq) +{ + return (txq->status & RWNX_TXQ_STOP_FULL); +} + +static inline bool rwnx_txq_is_scheduled(struct rwnx_txq *txq) +{ + return (txq->status & RWNX_TXQ_IN_HWQ_LIST); +} + +/** + * foreach_sta_txq - Macro to iterate over all TXQ of a STA in increasing + * TID order + * + * @sta: pointer to rwnx_sta + * @txq: pointer to rwnx_txq updated with the next TXQ at each iteration + * @tid: int updated with the TXQ tid at each iteration + * @rwnx_hw: main driver data + */ +#ifdef CONFIG_MAC80211_TXQ +#define foreach_sta_txq(sta, txq, tid, rwnx_hw) \ + for (tid = 0, txq = rwnx_txq_sta_get(sta, 0); \ + tid < NX_NB_TXQ_PER_STA; \ + tid++, txq = rwnx_txq_sta_get(sta, tid)) + +#elif defined(CONFIG_RWNX_FULLMAC) /* CONFIG_RWNX_FULLMAC */ +#define foreach_sta_txq(sta, txq, tid, rwnx_hw) \ + for (tid = 0, txq = rwnx_txq_sta_get(sta, 0, rwnx_hw); \ + tid < (is_multicast_sta(sta->sta_idx) ? 1 : NX_NB_TXQ_PER_STA); \ + tid++, txq++) + +#endif + +/** + * foreach_sta_txq_prio - Macro to iterate over all TXQ of a STA in + * decreasing priority order + * + * @sta: pointer to rwnx_sta + * @txq: pointer to rwnx_txq updated with the next TXQ at each iteration + * @tid: int updated with the TXQ tid at each iteration + * @i: int updated with ieration count + * @rwnx_hw: main driver data + * + * Note: For fullmac txq for mgmt frame is skipped + */ +#ifdef CONFIG_RWNX_FULLMAC +#define foreach_sta_txq_prio(sta, txq, tid, i, rwnx_hw) \ + for (i = 0, tid = nx_tid_prio[0], txq = rwnx_txq_sta_get(sta, tid, rwnx_hw); \ + i < NX_NB_TID_PER_STA; \ + i++, tid = nx_tid_prio[i], txq = rwnx_txq_sta_get(sta, tid, rwnx_hw)) +#endif + +/** + * foreach_vif_txq - Macro to iterate over all TXQ of a VIF (in AC order) + * + * @vif: pointer to rwnx_vif + * @txq: pointer to rwnx_txq updated with the next TXQ at each iteration + * @ac: int updated with the TXQ ac at each iteration + */ +#ifdef CONFIG_MAC80211_TXQ +#define foreach_vif_txq(vif, txq, ac) \ + for (ac = RWNX_HWQ_BK, txq = rwnx_txq_vif_get(vif, ac); \ + ac < NX_NB_TXQ_PER_VIF; \ + ac++, txq = rwnx_txq_vif_get(vif, ac)) + +#else +#define foreach_vif_txq(vif, txq, ac) \ + for (ac = RWNX_HWQ_BK, txq = &vif->txqs[0]; \ + ac < NX_NB_TXQ_PER_VIF; \ + ac++, txq++) +#endif + +#ifdef CONFIG_RWNX_FULLMAC +struct rwnx_txq *rwnx_txq_sta_get(struct rwnx_sta *sta, u8 tid, + struct rwnx_hw * rwnx_hw); +struct rwnx_txq *rwnx_txq_vif_get(struct rwnx_vif *vif, u8 type); +#endif /* CONFIG_RWNX_FULLMAC */ + +/** + * rwnx_txq_vif_get_status - return status bits related to the vif + * + * @rwnx_vif: Pointer to vif structure + */ +static inline u8 rwnx_txq_vif_get_status(struct rwnx_vif *rwnx_vif) +{ + struct rwnx_txq *txq = rwnx_txq_vif_get(rwnx_vif, 0); + return (txq->status & (RWNX_TXQ_STOP_CHAN | RWNX_TXQ_STOP_VIF_PS)); +} + +void rwnx_txq_vif_init(struct rwnx_hw * rwnx_hw, struct rwnx_vif *vif, + u8 status); +void rwnx_txq_vif_deinit(struct rwnx_hw * rwnx_hw, struct rwnx_vif *vif); +void rwnx_txq_sta_init(struct rwnx_hw * rwnx_hw, struct rwnx_sta *rwnx_sta, + u8 status); +void rwnx_txq_sta_deinit(struct rwnx_hw * rwnx_hw, struct rwnx_sta *rwnx_sta); +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_txq_unk_vif_init(struct rwnx_vif *rwnx_vif); +void rwnx_txq_unk_vif_deinit(struct rwnx_vif *vif); +void rwnx_txq_offchan_init(struct rwnx_vif *rwnx_vif); +void rwnx_txq_offchan_deinit(struct rwnx_vif *rwnx_vif); +void rwnx_txq_tdls_vif_init(struct rwnx_vif *rwnx_vif); +void rwnx_txq_tdls_vif_deinit(struct rwnx_vif *vif); +void rwnx_txq_tdls_sta_start(struct rwnx_vif *rwnx_vif, u16 reason, + struct rwnx_hw *rwnx_hw); +void rwnx_txq_tdls_sta_stop(struct rwnx_vif *rwnx_vif, u16 reason, + struct rwnx_hw *rwnx_hw); +#endif + + +void rwnx_txq_add_to_hw_list(struct rwnx_txq *txq); +void rwnx_txq_del_from_hw_list(struct rwnx_txq *txq); +void rwnx_txq_stop(struct rwnx_txq *txq, u16 reason); +void rwnx_txq_start(struct rwnx_txq *txq, u16 reason); +void rwnx_txq_vif_start(struct rwnx_vif *vif, u16 reason, + struct rwnx_hw *rwnx_hw); +void rwnx_txq_vif_stop(struct rwnx_vif *vif, u16 reason, + struct rwnx_hw *rwnx_hw); + +#ifdef CONFIG_RWNX_FULLMAC +void rwnx_txq_sta_start(struct rwnx_sta *sta, u16 reason, + struct rwnx_hw *rwnx_hw); +void rwnx_txq_sta_stop(struct rwnx_sta *sta, u16 reason, + struct rwnx_hw *rwnx_hw); +void rwnx_txq_offchan_start(struct rwnx_hw *rwnx_hw); +void rwnx_txq_sta_switch_vif(struct rwnx_sta *sta, struct rwnx_vif *old_vif, + struct rwnx_vif *new_vif); + +#endif /* CONFIG_RWNX_FULLMAC */ + +int rwnx_txq_queue_skb(struct sk_buff *skb, struct rwnx_txq *txq, + struct rwnx_hw *rwnx_hw, bool retry); +void rwnx_txq_confirm_any(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq, + struct rwnx_hwq *hwq, struct rwnx_sw_txhdr *sw_txhdr); + + +void rwnx_hwq_init(struct rwnx_hw *rwnx_hw); +void rwnx_hwq_process(struct rwnx_hw *rwnx_hw, struct rwnx_hwq *hwq); +void rwnx_hwq_process_all(struct rwnx_hw *rwnx_hw); + +#endif /* _RWNX_TXQ_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_utils.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_utils.c new file mode 100644 index 0000000..c78e58e --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_utils.c @@ -0,0 +1,39 @@ +/** + * rwnx_utils.c + * + * IPC utility function definitions + * + * Copyright (C) RivieraWaves 2012-2019 + */ +#include "rwnx_utils.h" +#include "rwnx_defs.h" +#include "rwnx_rx.h" +#include "rwnx_tx.h" +#include "rwnx_msg_rx.h" +#include "rwnx_debugfs.h" +#include "rwnx_prof.h" +#include "ipc_host.h" + + +extern int get_testmode(void); +extern void get_fw_path(char* fw_path); +extern int testmode; +extern char aic_fw_path[200]; + +int rwnx_init_aic(struct rwnx_hw *rwnx_hw) +{ + RWNX_DBG(RWNX_FN_ENTRY_STR); +#ifdef AICWF_SDIO_SUPPORT + aicwf_sdio_host_init(&(rwnx_hw->sdio_env), NULL, NULL, rwnx_hw); +#else + aicwf_usb_host_init(&(rwnx_hw->usb_env), NULL, NULL, rwnx_hw); +#endif + rwnx_cmd_mgr_init(rwnx_hw->cmd_mgr); + + testmode = get_testmode(); + memset(aic_fw_path, 0, 200); + get_fw_path(aic_fw_path); + + return 0; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_utils.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_utils.h new file mode 100644 index 0000000..402fb39 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_utils.h @@ -0,0 +1,136 @@ +/** + * rwnx_ipc_utils.h + * + * IPC utility function declarations + * + * Copyright (C) RivieraWaves 2012-2019 + */ +#ifndef _RWNX_IPC_UTILS_H_ +#define _RWNX_IPC_UTILS_H_ + +#include +#include +#include + +#include "lmac_msg.h" +#include "aicwf_debug.h" + + +#if 0 +#ifdef CONFIG_RWNX_DBG +/* #define RWNX_DBG(format, arg...) pr_warn(format, ## arg) */ +#define RWNX_DBG printk +#else +#define RWNX_DBG(a...) do {} while (0) +#endif +#endif + + + +enum rwnx_dev_flag { + RWNX_DEV_RESTARTING, + RWNX_DEV_STACK_RESTARTING, + RWNX_DEV_STARTED, +}; + +struct rwnx_hw; +struct rwnx_sta; + +/** + * struct rwnx_ipc_elem - Generic IPC buffer of fixed size + * + * @addr: Host address of the buffer. + * @dma_addr: DMA address of the buffer. + */ +struct rwnx_ipc_elem { + void *addr; + dma_addr_t dma_addr; +}; + +/** + * struct rwnx_ipc_elem_pool - Generic pool of IPC buffers of fixed size + * + * @nb: Number of buffers currenlty allocated in the pool + * @buf: Array of buffers (size of array is @nb) + * @pool: DMA pool in which buffers have been allocated + */ +struct rwnx_ipc_elem_pool { + int nb; + struct rwnx_ipc_elem *buf; + struct dma_pool *pool; +}; + +/** + * struct rwnx_ipc_elem - Generic IPC buffer of variable size + * + * @addr: Host address of the buffer. + * @dma_addr: DMA address of the buffer. + * @size: Size, in bytes, of the buffer + */ +struct rwnx_ipc_elem_var { + void *addr; + dma_addr_t dma_addr; + size_t size; +}; + +/** + * struct rwnx_ipc_dbgdump_elem - IPC buffer for debug dump + * + * @mutex: Mutex to protect access to debug dump + * @buf: IPC buffer + */ +struct rwnx_ipc_dbgdump_elem { + struct mutex mutex; + struct rwnx_ipc_elem_var buf; +}; + +static const u32 rwnx_rxbuff_pattern = 0xCAFEFADE; + +/* + * Maximum Length of Radiotap header vendor specific data(in bytes) + */ +#define RADIOTAP_HDR_VEND_MAX_LEN 16 + +/* + * Maximum Radiotap Header Length without vendor specific data (in bytes) + */ +#define RADIOTAP_HDR_MAX_LEN 80 + +/* + * Unsupported HT Frame data length (in bytes) + */ +#define UNSUP_RX_VEC_DATA_LEN 2 + +/** + * struct rwnx_ipc_skb_elem - IPC buffer for SKB element + * + * @skb: Pointer to the skb buffer allocated + * @dma_addr: DMA address of the data buffer fo skb + * + */ +struct rwnx_ipc_skb_elem { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#ifdef CONFIG_RWNX_FULLMAC + +/* Maximum number of rx buffer the fw may use at the same time */ +#define RWNX_RXBUFF_MAX (64 * NX_REMOTE_STA_MAX) + +/** + * struct rwnx_ipc_rxbuf_elems - IPC buffers for RX + * + * @skb: Array of buffer push to FW. + * @idx: Index of the last pushed skb.(Use to find the next free entry quicker) + * + * Note: contrary to softmac version, dma_addr are stored inside skb->cb. + * (cf &struct rwnx_skb_cb) + */ +struct rwnx_ipc_rxbuf_elems { + struct sk_buff *skb[RWNX_RXBUFF_MAX]; + int idx; +}; + +#endif /* CONFIG_RWNX_FULLMAC */ +#endif /* _RWNX_IPC_UTILS_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_v7.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_v7.c new file mode 100644 index 0000000..c8bcfe1 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_v7.c @@ -0,0 +1,192 @@ +/** + ****************************************************************************** + * + * @file rwnx_v7.c - Support for v7 platform + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#include "rwnx_v7.h" +#include "rwnx_defs.h" +#include "rwnx_irqs.h" +#include "reg_access.h" +#include "hal_desc.h" + +struct rwnx_v7 +{ + u8 *pci_bar0_vaddr; + u8 *pci_bar1_vaddr; +}; + +static int rwnx_v7_platform_enable(struct rwnx_hw *rwnx_hw) +{ + int ret; + + /* sched_setscheduler on ONESHOT threaded irq handler for BCNs ? */ + ret = request_irq(rwnx_hw->plat->pci_dev->irq, rwnx_irq_hdlr, 0, + "rwnx", rwnx_hw); + return ret; +} + +static int rwnx_v7_platform_disable(struct rwnx_hw *rwnx_hw) +{ + free_irq(rwnx_hw->plat->pci_dev->irq, rwnx_hw); + return 0; +} + +static void rwnx_v7_platform_deinit(struct rwnx_plat *rwnx_plat) +{ + #ifdef CONFIG_PCI + struct rwnx_v7 *rwnx_v7 = (struct rwnx_v7 *)rwnx_plat->priv; + + pci_disable_device(rwnx_plat->pci_dev); + iounmap(rwnx_v7->pci_bar0_vaddr); + iounmap(rwnx_v7->pci_bar1_vaddr); + pci_release_regions(rwnx_plat->pci_dev); + pci_clear_master(rwnx_plat->pci_dev); + pci_disable_msi(rwnx_plat->pci_dev); + #endif + kfree(rwnx_plat); +} + +static u8* rwnx_v7_get_address(struct rwnx_plat *rwnx_plat, int addr_name, + unsigned int offset) +{ + struct rwnx_v7 *rwnx_v7 = (struct rwnx_v7 *)rwnx_plat->priv; + + if (WARN(addr_name >= RWNX_ADDR_MAX, "Invalid address %d", addr_name)) + return NULL; + + if (addr_name == RWNX_ADDR_CPU) + return rwnx_v7->pci_bar0_vaddr + offset; + else + return rwnx_v7->pci_bar1_vaddr + offset; +} + +static void rwnx_v7_ack_irq(struct rwnx_plat *rwnx_plat) +{ + +} + +static const u32 rwnx_v7_config_reg[] = { + NXMAC_DEBUG_PORT_SEL_ADDR, + SYSCTRL_DIAG_CONF_ADDR, + SYSCTRL_PHYDIAG_CONF_ADDR, + SYSCTRL_RIUDIAG_CONF_ADDR, + RF_V7_DIAGPORT_CONF1_ADDR, +}; + +static const u32 rwnx_v7_he_config_reg[] = { + SYSCTRL_DIAG_CONF0, + SYSCTRL_DIAG_CONF1, + SYSCTRL_DIAG_CONF2, + SYSCTRL_DIAG_CONF3, +}; + +static int rwnx_v7_get_config_reg(struct rwnx_plat *rwnx_plat, const u32 **list) +{ + u32 fpga_sign; + + if (!list) + return 0; + + fpga_sign = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_SIGNATURE_ADDR); + if (__FPGA_TYPE(fpga_sign) == 0xc0ca) { + *list = rwnx_v7_he_config_reg; + return ARRAY_SIZE(rwnx_v7_he_config_reg); + } else { + *list = rwnx_v7_config_reg; + return ARRAY_SIZE(rwnx_v7_config_reg); + } +} + + +/** + * rwnx_v7_platform_init - Initialize the DINI platform + * + * @pci_dev PCI device + * @rwnx_plat Pointer on struct rwnx_stat * to be populated + * + * @return 0 on success, < 0 otherwise + * + * Allocate and initialize a rwnx_plat structure for the dini platform. + */ +int rwnx_v7_platform_init(struct pci_dev *pci_dev, struct rwnx_plat **rwnx_plat) +{ + struct rwnx_v7 *rwnx_v7; + u16 pci_cmd; + int ret = 0; + + *rwnx_plat = kzalloc(sizeof(struct rwnx_plat) + sizeof(struct rwnx_v7), + GFP_KERNEL); + if (!*rwnx_plat) + return -ENOMEM; + + rwnx_v7 = (struct rwnx_v7 *)(*rwnx_plat)->priv; + + /* Hotplug fixups */ + pci_read_config_word(pci_dev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(pci_dev, PCI_COMMAND, pci_cmd); + pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES >> 2); + + if ((ret = pci_enable_device(pci_dev))) { + dev_err(&(pci_dev->dev), "pci_enable_device failed\n"); + goto out_enable; + } + + pci_set_master(pci_dev); + + if ((ret = pci_request_regions(pci_dev, KBUILD_MODNAME))) { + dev_err(&(pci_dev->dev), "pci_request_regions failed\n"); + goto out_request; + } + + #ifdef CONFIG_PCI + if (pci_enable_msi(pci_dev)) + { + dev_err(&(pci_dev->dev), "pci_enable_msi failed\n"); + goto out_msi; + + } + #endif + + if (!(rwnx_v7->pci_bar0_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 0))) { + dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 0); + ret = -ENOMEM; + goto out_bar0; + } + if (!(rwnx_v7->pci_bar1_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 1))) { + dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 1); + ret = -ENOMEM; + goto out_bar1; + } + + (*rwnx_plat)->enable = rwnx_v7_platform_enable; + (*rwnx_plat)->disable = rwnx_v7_platform_disable; + (*rwnx_plat)->deinit = rwnx_v7_platform_deinit; + (*rwnx_plat)->get_address = rwnx_v7_get_address; + (*rwnx_plat)->ack_irq = rwnx_v7_ack_irq; + (*rwnx_plat)->get_config_reg = rwnx_v7_get_config_reg; + + return 0; + + out_bar1: + iounmap(rwnx_v7->pci_bar0_vaddr); + out_bar0: +#ifdef CONFIG_PCI + pci_disable_msi(pci_dev); + out_msi: +#endif + pci_release_regions(pci_dev); + out_request: +#ifdef CONFIG_PCI + pci_clear_master(pci_dev); +#endif + pci_disable_device(pci_dev); + out_enable: + kfree(*rwnx_plat); + return ret; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_v7.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_v7.h new file mode 100644 index 0000000..8bb99df --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_v7.h @@ -0,0 +1,20 @@ +/** + ****************************************************************************** + * + * @file rwnx_v7.h + * + * Copyright (C) RivieraWaves 2012-2019 + * + ****************************************************************************** + */ + +#ifndef _RWNX_V7_H_ +#define _RWNX_V7_H_ + +#include +#include "rwnx_platform.h" + +int rwnx_v7_platform_init(struct pci_dev *pci_dev, + struct rwnx_plat **rwnx_plat); + +#endif /* _RWNX_V7_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_version.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_version.h new file mode 100644 index 0000000..8eec30b --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_version.h @@ -0,0 +1,11 @@ +#ifndef _RWNX_VERSION_H_ +#define _RWNX_VERSION_H_ + +#include "rwnx_version_gen.h" + +static inline void rwnx_print_version(void) +{ + AICWFDBG(LOGINFO, RWNX_VERS_BANNER"\n"); +} + +#endif /* _RWNX_VERSION_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_version_gen.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_version_gen.h new file mode 100755 index 0000000..c3f0252 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_version_gen.h @@ -0,0 +1,5 @@ +#define RWNX_VERS_REV "1a4b0054d2M (master)" +#define RWNX_VERS_MOD "6.4.3.0" +#define RWNX_VERS_BANNER "rwnx v6.4.3.0 - 1a4b0054d2M (master)" +#define RELEASE_DATE "2025_0605_3d4cc869" + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_wakelock.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_wakelock.c new file mode 100644 index 0000000..90e7ad8 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_wakelock.c @@ -0,0 +1,92 @@ +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) +#include +#else +#include +#endif +#include "rwnx_defs.h" +#include "rwnx_wakelock.h" + +struct wakeup_source *rwnx_wakeup_init(const char *name) +{ + struct wakeup_source *ws; + ws = wakeup_source_create(name); + wakeup_source_add(ws); + return ws; +} + +void rwnx_wakeup_deinit(struct wakeup_source *ws) +{ + if (ws && ws->active) + __pm_relax(ws); + wakeup_source_remove(ws); + wakeup_source_destroy(ws); +} + +struct wakeup_source *rwnx_wakeup_register(struct device *dev, const char *name) +{ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) + return wakeup_source_register(dev, name); +#else + +#if defined(CONFIG_PLATFORM_ROCKCHIP2) || defined(CONFIG_PLATFORM_ROCKCHIP) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + return wakeup_source_register(dev, name); +#else + return wakeup_source_register(name); +#endif + +#else + return wakeup_source_register(name); +#endif//#if defined(CONFIG_PLATFORM_ROCKCHIP2) || defined(CONFIG_PLATFORM_ROCKCHIP) + +#endif//LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) +} + +void rwnx_wakeup_unregister(struct wakeup_source *ws) +{ + if (ws && ws->active) + __pm_relax(ws); + wakeup_source_unregister(ws); +} + +void rwnx_wakeup_lock(struct wakeup_source *ws) +{ + AICWFDBG(LOGDEBUG, "%s enter \r\n", __func__); + __pm_stay_awake(ws); +} + +void rwnx_wakeup_unlock(struct wakeup_source *ws) +{ + AICWFDBG(LOGDEBUG, "%s enter \r\n", __func__); + __pm_relax(ws); +} + +void rwnx_wakeup_lock_timeout(struct wakeup_source *ws, unsigned int msec) +{ + __pm_wakeup_event(ws, msec); +} + +void aicwf_wakeup_lock_init(struct rwnx_hw *rwnx_hw) +{ + rwnx_hw->ws_tx = rwnx_wakeup_init("rwnx_tx_wakelock"); + rwnx_hw->ws_rx = rwnx_wakeup_init("rwnx_rx_wakelock"); + rwnx_hw->ws_irqrx = rwnx_wakeup_init("rwnx_irqrx_wakelock"); + rwnx_hw->ws_pwrctrl = rwnx_wakeup_init("rwnx_pwrcrl_wakelock"); +} + +void aicwf_wakeup_lock_deinit(struct rwnx_hw *rwnx_hw) +{ + rwnx_wakeup_deinit(rwnx_hw->ws_tx); + rwnx_wakeup_deinit(rwnx_hw->ws_rx); + rwnx_wakeup_deinit(rwnx_hw->ws_irqrx); + rwnx_wakeup_deinit(rwnx_hw->ws_pwrctrl); + rwnx_hw->ws_tx = NULL; + rwnx_hw->ws_rx = NULL; + rwnx_hw->ws_irqrx = NULL; + rwnx_hw->ws_pwrctrl = NULL; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_wakelock.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_wakelock.h new file mode 100644 index 0000000..9c9655a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/rwnx_wakelock.h @@ -0,0 +1,21 @@ +#ifndef __RWNX_WAKELOCK_H +#define __RWNX_WAKELOCK_H + +#include +#include +#include + +struct wakeup_source *rwnx_wakeup_init(const char *name); +void rwnx_wakeup_deinit(struct wakeup_source *ws); + +struct wakeup_source *rwnx_wakeup_register(struct device *dev, const char *name); +void rwnx_wakeup_unregister(struct wakeup_source *ws); + +void rwnx_wakeup_lock(struct wakeup_source *ws); +void rwnx_wakeup_unlock(struct wakeup_source *ws); +void rwnx_wakeup_lock_timeout(struct wakeup_source *ws, unsigned int msec); + +void aicwf_wakeup_lock_init(struct rwnx_hw *rwnx_hw); +void aicwf_wakeup_lock_deinit(struct rwnx_hw *rwnx_hw); + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/sdio_host.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/sdio_host.c new file mode 100644 index 0000000..59225b3 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/sdio_host.c @@ -0,0 +1,142 @@ +/** + * sdio_host.c + * + * SDIO host function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + + +#include "sdio_host.h" +//#include "ipc_compat.h" +#include "rwnx_tx.h" +#include "rwnx_platform.h" + +/** + **************************************************************************************** + */ +void aicwf_sdio_host_init(struct sdio_host_env_tag *env, + void *cb, + void *shared_env_ptr, + void *pthis) +{ + // Reset the environments + + // Reset the Host environment + memset(env, 0, sizeof(struct sdio_host_env_tag)); + // Save the pointer to the register base + env->pthis = pthis; +} + +/** + **************************************************************************************** + */ +volatile struct txdesc_host *aicwf_sdio_host_txdesc_get(struct sdio_host_env_tag *env, const int queue_idx) +{ + // struct ipc_shared_env_tag *shared_env_ptr = env->shared; + volatile struct txdesc_host *txdesc_free; + uint32_t used_idx = env->txdesc_used_idx[queue_idx]; + uint32_t free_idx = env->txdesc_free_idx[queue_idx]; + + // ASSERT_ERR(queue_idx < SDIO_TXQUEUE_CNT); + // ASSERT_ERR((free_idx - used_idx) <= SDIO_TXDESC_CNT); + + // Check if a free descriptor is available + if (free_idx != (used_idx + SDIO_TXDESC_CNT)) + { + // Get the pointer to the first free descriptor + // txdesc_free = shared_env_ptr->txdesc[queue_idx] + (free_idx % IPC_TXDESC_CNT); + } + else + { + txdesc_free = NULL; + } + + return txdesc_free; +} + +/** + **************************************************************************************** + */ +void aicwf_sdio_host_txdesc_push(struct sdio_host_env_tag *env, const int queue_idx, const uint64_t host_id) +{ + //printk("push, %d, %d, 0x%llx \r\n", queue_idx, env->txdesc_free_idx[queue_idx], host_id); + // Save the host id in the environment + env->tx_host_id[queue_idx][env->txdesc_free_idx[queue_idx] % SDIO_TXDESC_CNT] = host_id; + + // Increment the index + env->txdesc_free_idx[queue_idx]++; + if(env->txdesc_free_idx[queue_idx]==0x40000000) + env->txdesc_free_idx[queue_idx] = 0; +} + +/** + **************************************************************************************** + */ +void aicwf_sdio_host_tx_cfm_handler(struct sdio_host_env_tag *env, u32 *data) +{ + u32 queue_idx = 0;// data[0]; + //struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)env->pthis; + struct sk_buff *skb = NULL; + struct rwnx_txhdr *txhdr; + + // TX confirmation descriptors have been received + // REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM); + //while (1) + { + // Get the used index and increase it. We do the increase before knowing if the + // current buffer is confirmed because the callback function may call the + // ipc_host_txdesc_get() in case flow control was enabled and the index has to be + // already at the good value to ensure that the test of FIFO full is correct + //uint32_t used_idx = env->txdesc_used_idx[queue_idx]++; + uint32_t used_idx = data[1]; + uint32_t host_id = env->tx_host_id[queue_idx][used_idx % SDIO_TXDESC_CNT]; + + // Reset the host id in the array + env->tx_host_id[queue_idx][used_idx % SDIO_TXDESC_CNT] = 0; + + // call the external function to indicate that a TX packet is freed + if (host_id == 0) + { + // No more confirmations, so put back the used index at its initial value + env->txdesc_used_idx[queue_idx] = used_idx; + printk("ERROR:No more confirmations\r\n"); + //break; + } + // set the cfm status + skb = (struct sk_buff *)(uint32_t)host_id; + txhdr = (struct rwnx_txhdr *)skb->data; + txhdr->hw_hdr.cfm.status = (union rwnx_hw_txstatus)data[0]; + printk("sdio_host_tx_cfm_handler:used_idx=%d, 0x%p, status=%x\r\n",used_idx, env->pthis, txhdr->hw_hdr.cfm.status.value); + //if (env->cb.send_data_cfm(env->pthis, host_id) != 0) + if (rwnx_txdatacfm(env->pthis, (void *)host_id) != 0) + { + // No more confirmations, so put back the used index at its initial value + env->txdesc_used_idx[queue_idx] = used_idx; + env->tx_host_id[queue_idx][used_idx % SDIO_TXDESC_CNT] = host_id; + // and exit the loop + printk("ERROR:rwnx_txdatacfm,\r\n"); + // break; + } + + } +} + +int aicwf_rwnx_sdio_platform_init(struct aic_sdio_dev *sdiodev) +{ + struct rwnx_plat *rwnx_plat = NULL; + void *drvdata; + int ret = -ENODEV; + + rwnx_plat = kzalloc(sizeof(struct rwnx_plat), GFP_KERNEL); + + if (!rwnx_plat) { + return -ENOMEM; + } + + rwnx_plat->sdiodev = sdiodev; + ret = rwnx_platform_init(rwnx_plat, &drvdata); + + return ret; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/sdio_host.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/sdio_host.h new file mode 100644 index 0000000..e20f147 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/sdio_host.h @@ -0,0 +1,42 @@ +/** + * sdio_host.h + * + * SDIO host function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + + +#ifndef _SDIO_HOST_H_ +#define _SDIO_HOST_H_ + +#include "lmac_types.h" +#include "aicwf_sdio.h" + +#define SDIO_TXQUEUE_CNT NX_TXQ_CNT +#define SDIO_TXDESC_CNT NX_TXDESC_CNT + + +/// Definition of the IPC Host environment structure. +struct sdio_host_env_tag +{ + // Index used that points to the first free TX desc + uint32_t txdesc_free_idx[SDIO_TXQUEUE_CNT]; + // Index used that points to the first used TX desc + uint32_t txdesc_used_idx[SDIO_TXQUEUE_CNT]; + // Array storing the currently pushed host ids, per IPC queue + uint64_t tx_host_id[SDIO_TXQUEUE_CNT][SDIO_TXDESC_CNT]; + + /// Pointer to the attached object (used in callbacks and register accesses) + void *pthis; +}; + +extern void aicwf_sdio_host_init(struct sdio_host_env_tag *env, + void *cb, void *shared_env_ptr, void *pthis); + +extern void aicwf_sdio_host_txdesc_push(struct sdio_host_env_tag *env, const int queue_idx, const uint64_t host_id); + +extern void aicwf_sdio_host_tx_cfm_handler(struct sdio_host_env_tag *env, u32 *data); +extern int aicwf_rwnx_sdio_platform_init(struct aic_sdio_dev *sdiodev); + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/usb_host.c b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/usb_host.c new file mode 100644 index 0000000..fca8dc4 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/usb_host.c @@ -0,0 +1,154 @@ +/** + * usb_host.c + * + * USB host function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + + +#include "usb_host.h" +//#include "ipc_compat.h" +#include "rwnx_tx.h" +#include "rwnx_platform.h" +#include "aicwf_debug.h" + +/** + **************************************************************************************** + */ +void aicwf_usb_host_init(struct usb_host_env_tag *env, + void *cb, + void *shared_env_ptr, + void *pthis) +{ + // Reset the environments + + // Reset the Host environment + memset(env, 0, sizeof(struct usb_host_env_tag)); + // Save the pointer to the register base + env->pthis = pthis; +} + +/** + **************************************************************************************** + */ +volatile struct txdesc_host *aicwf_usb_host_txdesc_get(struct usb_host_env_tag *env, const int queue_idx) +{ + // struct ipc_shared_env_tag *shared_env_ptr = env->shared; + volatile struct txdesc_host *txdesc_free = NULL; + uint32_t used_idx = env->txdesc_used_idx[queue_idx]; + uint32_t free_idx = env->txdesc_free_idx[queue_idx]; + + // ASSERT_ERR(queue_idx < SDIO_TXQUEUE_CNT); + // ASSERT_ERR((free_idx - used_idx) <= USB_TXDESC_CNT); + + // Check if a free descriptor is available + if (free_idx != (used_idx + USB_TXDESC_CNT)) + { + // Get the pointer to the first free descriptor + // txdesc_free = shared_env_ptr->txdesc[queue_idx] + (free_idx % IPC_TXDESC_CNT); + } + else + { + //txdesc_free = NULL; + } + + return txdesc_free; +} + +/** + **************************************************************************************** + */ +void aicwf_usb_host_txdesc_push(struct usb_host_env_tag *env, const int queue_idx, const uint64_t host_id) +{ + //printk("push, %d, %d, 0x%llx \r\n", queue_idx, env->txdesc_free_idx[queue_idx], host_id); + // Save the host id in the environment + env->tx_host_id[queue_idx][env->txdesc_free_idx[queue_idx] % USB_TXDESC_CNT] = host_id; + + // Increment the index + env->txdesc_free_idx[queue_idx]++; + if(env->txdesc_free_idx[queue_idx]==0x40000000) + env->txdesc_free_idx[queue_idx] = 0; +} + +/** + **************************************************************************************** + */ +void aicwf_usb_host_tx_cfm_handler(struct usb_host_env_tag *env, u32 *data) +{ + u32 queue_idx = 0;//data[0]; + //struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)env->pthis; + struct sk_buff *skb = NULL; + struct rwnx_txhdr *txhdr; + AICWFDBG(LOGTRACE, "%s Enter \n", __func__); + //printk("sdio_host_tx_cfm_handler, %d, 0x%08x\r\n", queue_idx, data[1]); + // TX confirmation descriptors have been received + // REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM); + //while (1) + { + // Get the used index and increase it. We do the increase before knowing if the + // current buffer is confirmed because the callback function may call the + // ipc_host_txdesc_get() in case flow control was enabled and the index has to be + // already at the good value to ensure that the test of FIFO full is correct + //uint32_t used_idx = env->txdesc_used_idx[queue_idx]++; + uint32_t used_idx = data[1]; + //uint64_t host_id = env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT]; + unsigned long host_id = env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT]; + + // Reset the host id in the array + env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT] = 0; + + // call the external function to indicate that a TX packet is freed + if (host_id == 0) + { + // No more confirmations, so put back the used index at its initial value + env->txdesc_used_idx[queue_idx] = used_idx; + AICWFDBG(LOGERROR, "ERROR:No more confirmations\r\n"); + return; + //break; + } + AICWFDBG(LOGINFO, "usb_host_tx_cfm_handler:used_idx=%d, hostid=%p, 0x%p, status=%x\r\n",used_idx, (void *)host_id, env->pthis, data[0]); + + // set the cfm status + skb = (struct sk_buff *)host_id; + txhdr = (struct rwnx_txhdr *)skb->data; + txhdr->hw_hdr.cfm.status = (union rwnx_hw_txstatus)data[0]; + //txhdr->hw_hdr.status = data[1]; + //if (env->cb.send_data_cfm(env->pthis, host_id) != 0) + if (rwnx_txdatacfm(env->pthis, (void *)host_id) != 0) + { + // No more confirmations, so put back the used index at its initial value + env->txdesc_used_idx[queue_idx] = used_idx; + env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT] = host_id; + // and exit the loop + AICWFDBG(LOGERROR, "ERROR:rwnx_txdatacfm,\r\n"); + // break; + } + + } +} + +int aicwf_rwnx_usb_platform_init(struct aic_usb_dev *usbdev) +{ + struct rwnx_plat *rwnx_plat = NULL; + void *drvdata; + int ret = -ENODEV; + + rwnx_plat = kzalloc(sizeof(struct rwnx_plat), GFP_KERNEL); + + if (!rwnx_plat) + return -ENOMEM; + +// rwnx_plat->pci_dev = pci_dev; + rwnx_plat->usbdev = usbdev; + + ret = rwnx_platform_init(rwnx_plat, &drvdata); +#if 0 + pci_set_drvdata(pci_dev, drvdata); + + if (ret) + rwnx_plat->deinit(rwnx_plat); +#endif + return ret; +} + diff --git a/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/usb_host.h b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/usb_host.h new file mode 100644 index 0000000..70b7eb2 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic8800_fdrv/usb_host.h @@ -0,0 +1,43 @@ +/** + * usb_host.h + * + * USB host function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + + +#ifndef _USB_HOST_H_ +#define _USB_HOST_H_ + +#include "lmac_types.h" +#include "aicwf_usb.h" + +#define USB_TXQUEUE_CNT NX_TXQ_CNT +#define USB_TXDESC_CNT NX_TXDESC_CNT + + +/// Definition of the IPC Host environment structure. +struct usb_host_env_tag +{ + // Index used that points to the first free TX desc + uint32_t txdesc_free_idx[USB_TXQUEUE_CNT]; + // Index used that points to the first used TX desc + uint32_t txdesc_used_idx[USB_TXQUEUE_CNT]; + // Array storing the currently pushed host ids, per IPC queue + //uint64_t tx_host_id[USB_TXQUEUE_CNT][USB_TXDESC_CNT]; + unsigned long tx_host_id[USB_TXQUEUE_CNT][USB_TXDESC_CNT]; + + /// Pointer to the attached object (used in callbacks and register accesses) + void *pthis; +}; + +extern void aicwf_usb_host_init(struct usb_host_env_tag *env, + void *cb, void *shared_env_ptr, void *pthis); + +extern void aicwf_usb_host_txdesc_push(struct usb_host_env_tag *env, const int queue_idx, const uint64_t host_id); + +extern void aicwf_usb_host_tx_cfm_handler(struct usb_host_env_tag *env, u32 *data); +extern int aicwf_rwnx_usb_platform_init(struct aic_usb_dev *usbdev); + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/Kconfig b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/Kconfig new file mode 100644 index 0000000..4c2e90a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/Kconfig @@ -0,0 +1,5 @@ +config AIC_LOADFW_SUPPORT + tristate "AIC8800 Load Firmware Support" + help + This is support for aic load firmware driver. + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/Makefile b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/Makefile new file mode 100644 index 0000000..58d7c93 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/Makefile @@ -0,0 +1,124 @@ +CONFIG_USB_SUPPORT =y +CONFIG_RFTEST =y +CONFIG_USB_BT =y +CONFIG_USB_MSG_EP = y +CONFIG_USB_NO_TRANS_DMA_MAP = n +CONFIG_M2D_OTA_AUTO_SUPPORT = n +CONFIG_LINK_DET_5G = y +CONFIG_FOR_IPCAM = n +CONFIG_USB_SUSPEND_REBOOT_TIME = n +CONFIG_SUPPORT_USB_SUSP = n +CONFIG_RADAR_OR_IR_DETECT =n +CONFIG_LOAD_BT_CONF = n +CONFIG_FLASH_CALRES = n + +# Need to set fw path in BOARD_KERNEL_CMDLINE +CONFIG_USE_FW_REQUEST ?= n +CONFIG_PREALLOC_RX_SKB ?= n +CONFIG_PREALLOC_TXQ ?= y +CONFIG_BAND_STEERING = n +CONFIG_PRBREQ_REPORT = n + +# Platform support list +CONFIG_PLATFORM_ROCKCHIP ?= n +CONFIG_PLATFORM_ALLWINNER ?= n +CONFIG_PLATFORM_AMLOGIC ?= n +CONFIG_PLATFORM_HI ?= n +CONFIG_PLATFORM_UBUNTU ?= y + +CONFIG_AIC_LOADFW_SUPPORT = m +MODULE_NAME = aic_load_fw +# CONFIG_AIC_FW_PATH = "/vendor/etc/firmware" +# export CONFIG_AIC_FW_PATH + +ifeq ($(CONFIG_USB_SUPPORT), y) +ccflags-y += -DAICWF_USB_SUPPORT +endif +ccflags-$(CONFIG_RFTEST) += -DCONFIG_RFTEST +ccflags-$(CONFIG_USB_BT) += -DCONFIG_USB_BT +ccflags-$(CONFIG_USB_MSG_EP) += -DCONFIG_USB_MSG_EP +ccflags-$(CONFIG_USB_NO_TRANS_DMA_MAP) += -DCONFIG_USB_NO_TRANS_DMA_MAP +ccflags-$(CONFIG_M2D_OTA_AUTO_SUPPORT) += -DCONFIG_M2D_OTA_AUTO_SUPPORT +ccflags-$(CONFIG_LINK_DET_5G) += -DCONFIG_LINK_DET_5G +ccflags-$(CONFIG_FOR_IPCAM) += -DCONFIG_FOR_IPCAM +ccflags-$(CONFIG_USE_FW_REQUEST) += -DCONFIG_USE_FW_REQUEST +ccflags-$(CONFIG_PREALLOC_RX_SKB) += -DCONFIG_PREALLOC_RX_SKB +ccflags-$(CONFIG_PREALLOC_TXQ) += -DCONFIG_PREALLOC_TXQ +ccflags-$(CONFIG_USB_SUSPEND_REBOOT_TIME) += -DCONFIG_USB_SUSPEND_REBOOT_TIME +ccflags-$(CONFIG_SUPPORT_USB_SUSP) += -DCONFIG_SUPPORT_USB_SUSP +ccflags-$(CONFIG_RADAR_OR_IR_DETECT) += -DCONFIG_RADAR_OR_IR_DETECT +ccflags-$(CONFIG_LOAD_BT_CONF) += -DCONFIG_LOAD_BT_CONF +ccflags-$(CONFIG_FLASH_CALRES) += -DCONFIG_FLASH_CALRES +ccflags-$(CONFIG_BAND_STEERING) += -DCONFIG_BAND_STEERING +ccflags-$(CONFIG_PRBREQ_REPORT) += -DCONFIG_PRBREQ_REPORT + +obj-$(CONFIG_AIC_LOADFW_SUPPORT) := $(MODULE_NAME).o +$(MODULE_NAME)-y := aic_bluetooth_main.o \ + aicbluetooth.o \ + aicwf_usb.o \ + aic_txrxif.o \ + aicbluetooth_cmds.o \ + aic_compat_8800d80.o \ + aic_compat_8800d80x2.o \ + md5.o + +$(MODULE_NAME)-$(CONFIG_PREALLOC_RX_SKB) += aicwf_rx_prealloc.o +$(MODULE_NAME)-$(CONFIG_PREALLOC_TXQ) += aicwf_txq_prealloc.o + + + +ifeq ($(CONFIG_PLATFORM_ROCKCHIP), y) +ccflags-$(CONFIG_PLATFORM_ROCKCHIP) += -DCONFIG_PLATFORM_ROCKCHIP +#KDIR := /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/kernel +#ARCH ?= arm +#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi- +KDIR := /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/kernel +ARCH ?= arm +CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/prebuilts/gcc/linux-x86/arm/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf- +#KDIR := /home/yaya/E/Rockchip/3399/rk3399-android-10/kernel +#ARCH ?= arm64 +#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3399/rk3399-android-10/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- +endif + +ifeq ($(CONFIG_PLATFORM_ALLWINNER), y) +ccflags-$(CONFIG_PLATFORM_ALLWINNER) += -DCONFIG_PLATFORM_ALLWINNER +KDIR := /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/kernel/linux-4.9 +ARCH ?= arm64 +CROSS_COMPILE ?= /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/out/gcc-linaro-5.3.1-2016.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- +endif + +ifeq ($(CONFIG_PLATFORM_AMLOGIC), y) +ccflags-$(CONFIG_PLATFORM_AMLOGIC) += -DCONFIG_PLATFORM_AMLOGIC +ARCH := arm +CROSS_COMPILE := /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel- +KDIR := /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/ + +endif + +ifeq ($(CONFIG_PLATFORM_UBUNTU), y) +ccflags-$(CONFIG_PLATFORM_UBUNTU) += -DCONFIG_PLATFORM_UBUNTU +KDIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) +KVER := $(shell uname -r) +MODDESTDIR := /lib/modules/$(KVER)/kernel/drivers/net/wireless/aic8800 +SUBARCH = $(shell uname -m | sed -e s/i.86/i386/ -e s/armv.l/arm/ -e s/aarch64/arm64/) +ARCH ?= $(SUBARCH) +CROSS_COMPILE := +endif + + +all: modules +modules: + make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules + +install: + mkdir -p $(MODDESTDIR) + install -p -m 644 $(MODULE_NAME).ko $(MODDESTDIR)/ + /sbin/depmod -a ${KVER} + +uninstall: + rm -rfv $(MODDESTDIR)/$(MODULE_NAME).ko + /sbin/depmod -a ${KVER} + +clean: + rm -rf *.o *.ko *.o.* *.mod.* modules.* Module.* .a* .o* .*.o.* *.mod .tmp* .cache.mk .modules.order.cmd .Module.symvers.cmd diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_bluetooth_main.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_bluetooth_main.c new file mode 100755 index 0000000..1b337c2 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_bluetooth_main.c @@ -0,0 +1,83 @@ +#include +#include +#include + +#include "aicwf_usb.h" +#include "rwnx_version_gen.h" +#include "aicwf_rx_prealloc.h" +#include "aicwf_debug.h" +#include "aicwf_txq_prealloc.h" + + +#define DRV_CONFIG_FW_NAME "fw.bin" +#define DRV_DESCRIPTION "AIC BLUETOOTH" +#define DRV_COPYRIGHT "Copyright(c) 2015-2020 AICSemi" +#define DRV_AUTHOR "AICSemi" +#define DRV_VERS_MOD "1.0" + +int testmode = FW_NORMAL_MODE; +int adap_test = 0; +char paringid[100]; +int n_para = 1; +int ble_scan_wakeup_reboot_time = 1000; +int aicwf_dbg_level = LOGERROR|LOGINFO|LOGDEBUG|LOGTRACE; +int flash_erase_len = 0x400000; +uint32_t ad_data_filter_mask = 0; +uint32_t gpio_num = 2;//default select gpiob2 for fw_wakeup_host +uint32_t gpio_dft_lvl = 0;//0:defalut pull down, 1:default pull up + +module_param(aicwf_dbg_level, int, 0660); +module_param(ble_scan_wakeup_reboot_time, int, 0660); +module_param(testmode, int, 0660); +module_param(adap_test, int, 0660); +module_param_string(paringid, paringid, 100, 0660); +module_param(flash_erase_len, int, 0660); + + +static void aicsmac_driver_register(void) +{ + aicwf_usb_register(); +} + +static int __init aic_bluetooth_mod_init(void) +{ + printk("%s \n", __func__); + printk("RELEASE DATE:%s \r\n", RELEASE_DATE); +#ifdef CONFIG_PREALLOC_RX_SKB + aicwf_prealloc_init(); +#endif + + aicsmac_driver_register(); + return 0; +} + +static void __exit aic_bluetooth_mod_exit(void) +{ + printk("%s\n", __func__); + aicwf_usb_exit(); + +#ifdef CONFIG_PREALLOC_RX_SKB + aicwf_prealloc_exit(); +#endif + +#ifdef CONFIG_PREALLOC_TXQ + aicwf_prealloc_txq_free(); +#endif +} + + +module_init(aic_bluetooth_mod_init); +module_exit(aic_bluetooth_mod_exit); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) +MODULE_IMPORT_NS("VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver"); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) +MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver); +#endif + +MODULE_FIRMWARE(DRV_CONFIG_FW_NAME); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERS_MOD); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80.c new file mode 100644 index 0000000..453b935 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80.c @@ -0,0 +1,677 @@ +#include "aic_txrxif.h" +#include "aicwf_usb.h" +#include "aicbluetooth.h" +#include "aic_compat_8800d80.h" +#include "aicwf_debug.h" +extern int ble_scan_wakeup_reboot_time; +extern uint32_t ad_data_filter_mask; +extern uint32_t gpio_num;//default select gpiob2 for fw_wakeup_host +extern uint32_t gpio_dft_lvl;//0:defalut pull down, 1:default pull up + +int rwnx_plat_bin_fw_upload_2(struct aic_usb_dev *usbdev, u32 fw_addr, + char *filename); +int rwnx_request_firmware_common(struct aic_usb_dev *usbdev, + u32** buffer, const char *filename); +void rwnx_plat_userconfig_parsing(char *buffer, int size); +void rwnx_release_firmware_common(u32** buffer); + +extern int testmode; +extern u8 chip_id; +extern u8 chip_mcu_id; + +typedef u32 (*array2_tbl_t)[2]; + +#define AIC_PATCH_MAGIG_NUM 0x48435450 // "PTCH" +#define AIC_PATCH_MAGIG_NUM_2 0x50544348 // "HCTP" +#define AIC_PATCH_BLOCK_MAX 4 + +typedef struct { + uint32_t magic_num; + uint32_t pair_start; + uint32_t magic_num_2; + uint32_t pair_count; + uint32_t block_dst[AIC_PATCH_BLOCK_MAX]; + uint32_t block_src[AIC_PATCH_BLOCK_MAX]; + uint32_t block_size[AIC_PATCH_BLOCK_MAX]; // word count +} aic_patch_t; + + +#define AIC_PATCH_OFST(mem) ((size_t) &((aic_patch_t *)0)->mem) +#define AIC_PATCH_ADDR(mem) ((u32) (aic_patch_str_base + AIC_PATCH_OFST(mem))) + +#define USER_PWROFST_COVER_CALIB_FLAG (0x01U << 0) +#define USER_CHAN_MAX_TXPWR_EN_FLAG (0x01U << 1) +#define USER_TX_USE_ANA_F_FLAG (0x01U << 2) +#define USER_APM_PRBRSP_OFFLOAD_DISABLE_FLAG (0x01U << 3) +#define USER_HE_MU_EDCA_UPDATE_DISABLE_FLAG (0x01U << 4) + +#define CFG_PWROFST_COVER_CALIB 1 +#ifdef CONFIG_POWER_LIMIT +#define CFG_USER_CHAN_MAX_TXPWR_EN 1 +#else +#define CFG_USER_CHAN_MAX_TXPWR_EN 0 +#endif +#define CFG_USER_TX_USE_ANA_F 0 +#ifdef CONFIG_PRBREQ_REPORT +#define CFG_USER_APM_PRBRSP_OFFLOAD_DISABLE 1 +#else +#define CFG_USER_APM_PRBRSP_OFFLOAD_DISABLE 0 +#endif + +#define CFG_USER_EXT_FLAGS_EN (CFG_PWROFST_COVER_CALIB || CFG_USER_CHAN_MAX_TXPWR_EN || CFG_USER_TX_USE_ANA_F|| CFG_USER_APM_PRBRSP_OFFLOAD_DISABLE) + +u32 patch_tbl_d80[][2] = +{ + #ifdef USE_5G + {0x00b4, 0xf3010001}, + #else + {0x00b4, 0xf3010000}, + #endif +#ifdef CONFIG_PLATFORM_HI + {0x0170, 0x00010001},//rx aggr counter +#else + {0x0170, 0x0001000A},//rx aggr counter +#endif + +#if 0//CFG_USER_EXT_FLAGS_EN + {0x0188, 0x00000000 +#if CFG_PWROFST_COVER_CALIB + | USER_PWROFST_COVER_CALIB_FLAG +#endif +#if CFG_USER_CHAN_MAX_TXPWR_EN + | USER_CHAN_MAX_TXPWR_EN_FLAG +#endif +#if CFG_USER_TX_USE_ANA_F + | USER_TX_USE_ANA_F_FLAG +#endif +#if CFG_USER_APM_PRBRSP_OFFLOAD_DISABLE + | USER_APM_PRBRSP_OFFLOAD_DISABLE_FLAG +#endif + }, // user_ext_flags +#endif + +#ifdef CONFIG_RADAR_OR_IR_DETECT + {0x0019c,0x00000B00}, +#endif +}; + +//adap test +u32 adaptivity_patch_tbl_d80[][2] = { + {0x000C, 0x0000320A}, //linkloss_thd + {0x009C, 0x00000000}, //ac_param_conf + {0x0168, 0x00010000}, //tx_adaptivity_en +}; + +u32 syscfg_tbl_masked_8800d80[][3] = { +}; + +u32 syscfg_tbl_8800d80[][2] = { + #ifdef CONFIG_PMIC_SETTING + {0x70001408, 0x00000000}, // stop wdg + #endif /* CONFIG_PMIC_SETTING */ +}; + +extern int adap_test; + +#define NEW_PATCH_BUFFER_MAP 1 + +int aicwf_patch_config_8800d80(struct aic_usb_dev *usb_dev) +{ + u32 rd_patch_addr; + u32 aic_patch_addr; + u32 config_base, aic_patch_str_base; + #if (NEW_PATCH_BUFFER_MAP) + u32 patch_buff_addr, patch_buff_base, rd_version_addr, rd_version_val; + #endif + uint32_t start_addr = 0x001D7000; + u32 patch_addr = start_addr; + u32 patch_cnt = sizeof(patch_tbl_d80) / 4 / 2; + struct dbg_mem_read_cfm rd_patch_addr_cfm; + int ret = 0; + int cnt = 0; + //adap test + int adap_patch_cnt = 0; + + if (adap_test) { + AICWFDBG(LOGINFO, "%s adap test \r\n", __func__); + adap_patch_cnt = sizeof(adaptivity_patch_tbl_d80)/sizeof(u32)/2; + } + + if (chip_id == CHIP_REV_U01) { + rd_patch_addr = RAM_FMAC_FW_ADDR_8800D80 + 0x0198; + } else { + rd_patch_addr = RAM_FMAC_FW_ADDR_8800D80_U02 + 0x0198; + } + aic_patch_addr = rd_patch_addr + 8; + + AICWFDBG(LOGERROR, "Read FW mem: %08x\n", rd_patch_addr); + if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, rd_patch_addr, &rd_patch_addr_cfm))) { + AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", rd_patch_addr, ret); + return ret; + } + AICWFDBG(LOGERROR, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata); + config_base = rd_patch_addr_cfm.memdata; + + if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, aic_patch_addr, &rd_patch_addr_cfm))) { + AICWFDBG(LOGERROR, "patch_str_base[0x%x] rd fail: %d\n", aic_patch_addr, ret); + return ret; + } + AICWFDBG(LOGERROR, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata); + aic_patch_str_base = rd_patch_addr_cfm.memdata; + + #if (NEW_PATCH_BUFFER_MAP) + if (chip_id == CHIP_REV_U01) { + rd_version_addr = RAM_FMAC_FW_ADDR_8800D80 + 0x01C; + } else { + rd_version_addr = RAM_FMAC_FW_ADDR_8800D80_U02 + 0x01C; + } + if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, rd_version_addr, &rd_patch_addr_cfm))) { + AICWFDBG(LOGERROR, "version val[0x%x] rd fail: %d\n", rd_version_addr, ret); + return ret; + } + rd_version_val = rd_patch_addr_cfm.memdata; + AICWFDBG(LOGINFO, "rd_version_val=%08X\n", rd_version_val); + usb_dev->fw_version_uint = rd_version_val; + if (rd_version_val > 0x06090100) { + patch_buff_addr = rd_patch_addr + 12; + ret = rwnx_send_dbg_mem_read_req(usb_dev, patch_buff_addr, &rd_patch_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "patch buf rd fail\n"); + return ret; + } + AICWFDBG(LOGINFO, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata); + patch_buff_base = rd_patch_addr_cfm.memdata; + patch_addr = start_addr = patch_buff_base; + } + #endif + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(magic_num), AIC_PATCH_MAGIG_NUM))) { + AICWFDBG(LOGERROR, "maigic_num[0x%x] write fail: %d\n", AIC_PATCH_ADDR(magic_num), ret); + return ret; + } + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(magic_num_2), AIC_PATCH_MAGIG_NUM_2))) { + AICWFDBG(LOGERROR, "maigic_num[0x%x] write fail: %d\n", AIC_PATCH_ADDR(magic_num_2), ret); + return ret; + } + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(pair_start), patch_addr))) { + AICWFDBG(LOGERROR, "pair_start[0x%x] write fail: %d\n", AIC_PATCH_ADDR(pair_start), ret); + return ret; + } + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(pair_count), patch_cnt + adap_patch_cnt))) { + AICWFDBG(LOGERROR, "pair_count[0x%x] write fail: %d\n", AIC_PATCH_ADDR(pair_count), ret); + return ret; + } + + for (cnt = 0; cnt < patch_cnt; cnt++) { + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, patch_tbl_d80[cnt][0]+config_base))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, patch_tbl_d80[cnt][1]))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt+4); + return ret; + } + } + + if (adap_test){ + int tmp_cnt = patch_cnt + adap_patch_cnt; + for (cnt = patch_cnt; cnt < tmp_cnt; cnt++) { + int tbl_idx = cnt - patch_cnt; + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, adaptivity_patch_tbl_d80[tbl_idx][0]+config_base))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, adaptivity_patch_tbl_d80[tbl_idx][1]))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt+4); + return ret; + } + } + } + + /* + * Patch block 0 ~ 3, that is void by default, can be set as: + * + * const u32 patch_block_0[3] = {0x11223344, 0x55667788, 0xaabbccdd}; + * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_dst[0]), 0x160000))) { + * printk("block_dst [0x%x] write fail: %d\n", (u32)(&aic_patch->block_dst[0]), ret); + * } + * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_src[0]), 0x307000))) { + * printk("block_src [0x%x] write fail: %d\n", (u32)(&aic_patch->block_src[0]), ret); + * } + * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_size[0]), sizeof(patch_block_0) / sizeof(u32)))) { + * printk("block_size[0x%x] write fail: %d\n", (u32)(&aic_patch->block_size[0]), ret); + * } + * if ((ret = rwnx_send_dbg_mem_block_write_req(usb_dev, 0x307000, sizeof(patch_block_0), patch_block_0))) { + * printk("blk set fail: %d\n", ret); + * } + */ + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[0]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[0]), ret); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[1]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[1]), ret); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[2]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[2]), ret); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[3]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[3]), ret); + return ret; + } + + return 0; +} + + +#if 0 +extern char aic_fw_path[200]; + +int rwnx_plat_userconfig_load_8800d80(struct aic_usb_dev *usb_dev){ + int size; + u32 *dst=NULL; + char *filename = FW_USERCONFIG_NAME_8800D80; + + AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(usb_dev, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + rwnx_plat_userconfig_parsing((char *)dst, size); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "userconfig download complete\n\n"); + return 0; + +} +#endif +int system_config_8800d80(struct aic_usb_dev *usb_dev){ + int syscfg_num; + int ret, cnt; + const u32 mem_addr = 0x40500000; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr, &rd_mem_addr_cfm); + if (ret) { + printk("%x rd fail: %d\n", mem_addr, ret); + return ret; + } + if (((rd_mem_addr_cfm.memdata >> 25) & 0x01UL) == 0x00UL) { + chip_mcu_id = 1; + } + chip_id = (u8)(rd_mem_addr_cfm.memdata >> 16); + printk("chip_id=%x, chip_mcu_id = %d\n", chip_id, chip_mcu_id); + #if 1 + syscfg_num = sizeof(syscfg_tbl_8800d80) / sizeof(u32) / 2; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl_8800d80[cnt][0], syscfg_tbl_8800d80[cnt][1]); + if (ret) { + printk("%x write fail: %d\n", syscfg_tbl_8800d80[cnt][0], ret); + return ret; + } + } + syscfg_num = sizeof(syscfg_tbl_masked_8800d80) / sizeof(u32) / 3; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_mask_write_req(usb_dev, + syscfg_tbl_masked_8800d80[cnt][0], syscfg_tbl_masked_8800d80[cnt][1], syscfg_tbl_masked_8800d80[cnt][2]); + if (ret) { + printk("%x mask write fail: %d\n", syscfg_tbl_masked_8800d80[cnt][0], ret); + return ret; + } + } + #endif + return 0; +} + + +static int aicbt_ext_patch_data_load(struct aic_usb_dev *usb_dev, struct aicbt_patch_info_t *patch_info, const char *filename) +{ + int ret = 0; + uint32_t ext_patch_nb = patch_info->ext_patch_nb; + char ext_patch_file_name[50]; + int index = 0; + uint32_t id = 0; + uint32_t addr = 0; + + + if (ext_patch_nb > 0){ + + for (index = 0; index < patch_info->ext_patch_nb; index++){ + id = *(patch_info->ext_patch_param + (index * 2)); + addr = *(patch_info->ext_patch_param + (index * 2) + 1); + memset(ext_patch_file_name, 0, sizeof(ext_patch_file_name)); + sprintf(ext_patch_file_name,"%s%d.bin", + filename, + id); + AICWFDBG(LOGDEBUG, "%s ext_patch_file_name:%s ext_patch_id:%x ext_patch_addr:%x \r\n", + __func__,ext_patch_file_name, id, addr); + + if (rwnx_plat_bin_fw_upload_android(usb_dev, addr, ext_patch_file_name)) { + ret = -1; + break; + } + } + } + return ret; +} + + +int aicfw_download_fw_8800d80(struct aic_usb_dev *usb_dev) +{ + struct aicbt_patch_table *head = NULL; + struct aicbt_patch_info_t patch_info = { + .info_len = 0, + .adid_addrinf = 0, + .addr_adid = 0, + .patch_addrinf = 0, + .addr_patch = 0, + .reset_addr = 0, + .reset_val = 0, + .adid_flag_addr = 0, + .adid_flag = 0, + }; + + int i = 0; + +#if 0 + if (chip_id == CHIP_REV_U01) { + head = aicbt_patch_table_alloc(usb_dev, FW_PATCH_TABLE_NAME_8800D80); + } else { + head = aicbt_patch_table_alloc(usb_dev, FW_PATCH_TABLE_NAME_8800D80_U02); + } + if (head == NULL){ + printk("aicbt_patch_table_alloc fail\n"); + return -1; + } + + if(head == NULL){ + return -1; + } + if (chip_id == CHIP_REV_U01) { + patch_info.addr_adid = FW_RAM_ADID_BASE_ADDR_8800D80; + patch_info.addr_patch = FW_RAM_PATCH_BASE_ADDR_8800D80; + } else if (chip_id == CHIP_REV_U02 || chip_id == CHIP_REV_U03) { + patch_info.addr_adid = FW_RAM_ADID_BASE_ADDR_8800D80_U02; + patch_info.addr_patch = FW_RAM_PATCH_BASE_ADDR_8800D80_U02; + } + aicbt_patch_info_unpack(&patch_info, head); + if(patch_info.info_len == 0) { + printk("%s, aicbt_patch_info_unpack fail\n", __func__); + return -1; + } + + printk("addr_adid 0x%x, addr_patch 0x%x\n", patch_info.addr_adid, patch_info.addr_patch); +#endif + if(testmode == FW_NORMAL_MODE){ + + if (chip_id != CHIP_REV_U01){ + #if 0 + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80_U02)) { + return -1; + } + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80_U02)) { + return -1; + } + + if (aicbt_ext_patch_data_load(usb_dev, &patch_info, FW_PATCH_BASE_NAME_8800D80_U02_EXT)) { + return -1; + } + + if (aicbt_patch_table_load(usb_dev, head)) { + return -1; + } + + #endif + if (IS_CHIP_ID_H()){ + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_FW_ADDR_8800D80_U02, FW_BASE_NAME_8800D80_H_U02)) + return -1; + } else { + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_FW_ADDR_8800D80_U02, FW_BASE_NAME_8800D80_U02)) + return -1; + } + #if 0 + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_CALIBMODE_ADDR_8800D80_U02, FW_CALIBMODE_NAME_8800D80_U02)) { + return -1; + } + if (rwnx_send_dbg_mem_write_req(usb_dev, 0x40500048, 0x1e0000)) + return -1; + #endif + if (aicwf_patch_config_8800d80(usb_dev)) { + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_FW_ADDR_8800D80_U02, HOST_START_APP_AUTO)) { + return -1; + } + }else { + #if 0 + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80)) { + return -1; + } + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80)) { + return -1; + } + #if 0 + if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME_8800D80)) { + return -1; + } + #else + if (aicbt_patch_table_load(usb_dev, head)) { + return -1; + } + #endif + #endif + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_FW_ADDR_8800D80, FW_BASE_NAME_8800D80)) { + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_FW_ADDR_8800D80, HOST_START_APP_AUTO)) { + return -1; + } + } + }else if(testmode == FW_TEST_MODE){ + if (chip_id != CHIP_REV_U01){ +#if 0 + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80_U02)) { + return -1; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80_U02)) { + return -1; + } + + if (aicbt_ext_patch_data_load(usb_dev, &patch_info, FW_PATCH_BASE_NAME_8800D80_U02_EXT)) { + return -1; + } + + if (aicbt_patch_table_load(usb_dev, head)) { + return -1; + } + + + if (chip_mcu_id) { + int ret = 0; + ret = rwnx_plat_flash_bin_upload_android(usb_dev, FLASH_BIN_ADDR_8800M80, FLASH_BIN_8800M80); + if (ret && ret!= ENOENT) { + AICWFDBG(LOGERROR,"%s flash bin download fail \r\n", __func__); + return -1; + } + } +#endif + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80_U02, FW_RF_BASE_NAME_8800D80_U02)) { + AICWFDBG(LOGERROR,"%s wifi fw download fail \r\n", __func__); + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80_U02, HOST_START_APP_AUTO)) { + return -1; + } + } else { + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80, FW_RF_BASE_NAME_8800D80)) { + AICWFDBG(LOGERROR,"%s wifi fw download fail \r\n", __func__); + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80, HOST_START_APP_AUTO)) { + return -1; + } + } + }else if(testmode == FW_BLE_SCAN_AD_FILTER_MODE){ +/* + data and ad_data_filter_mask instructions for use + ex. + data[18] = {0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42}; + mask = 1100 0000 0111 1111 1100 0000 0000 0000 = 0xc07fc000 + + data = 0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42 + mask = 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0...... fill 0 + + data & mask = "0x46 0x00" 0x00 0x00 0x00 0x00 0x00 0x00 0x00 "0x30 0xff 0xff 0x43 0x52 0x45 0x4c 0x42" + using data & mask value condition to wakeup host_wake_bt gpio +*/ + struct ble_wakeup_param_t* wakeup_param = (struct ble_wakeup_param_t*)kmalloc(sizeof(struct ble_wakeup_param_t), GFP_KERNEL); + uint32_t *write_blocks = (uint32_t *)wakeup_param; + + printk("%s ble scan wakeup \r\n", __func__); + + memset(wakeup_param, 0, sizeof(struct ble_wakeup_param_t)); + rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR_8800D80, FW_BLE_SCAN_AD_FILTER_NAME); + wakeup_param->magic_num = 0x53454C42;//magic_num + wakeup_param->delay_scan_to = 1000;//delay start scan time(ms) + wakeup_param->reboot_to = ble_scan_wakeup_reboot_time;//reboot time + /******************************************************************/ + ///gpio_trigger_idx : 0 if wakeup_param->gpio_dft_lvl[0]=0xfe,this idx will be invalid. + wakeup_param->gpio_num[0] = gpio_num;////default select gpiob2 for fw_wakeup_host + wakeup_param->gpio_dft_lvl[0] = gpio_dft_lvl;////0:defalut pull down, 1:default pull up + ///gpio_trigger_idx : 1 if wakeup_param->gpio_dft_lvl[1]=0xfe,this idx will be invalid. + wakeup_param->gpio_num[1] = 3;////default select gpiob2 for fw_wakeup_host + wakeup_param->gpio_dft_lvl[1] = 1;////0:defalut pull down, 1:default pull up + /********************************************************************/ + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 0 + { + const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[0].ad_len = 12; + wakeup_param->ad_filter[0].ad_type = 0x09; + memcpy(wakeup_param->ad_filter[0].ad_data, data,wakeup_param->ad_filter[0].ad_len-1);// 1111 1111 1110 0000 0000 0000 0000 0000 //0xffe00000 + wakeup_param->ad_filter[0].ad_data_mask = 0xffe00000; + wakeup_param->ad_filter[0].ad_role = ROLE_COMBO|(COMBO_0<<4); + wakeup_param->ad_filter[0].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[0].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 1 + { + const uint8_t data[2] = {0x12,0x18}; + wakeup_param->ad_filter[1].ad_len = 3; + wakeup_param->ad_filter[1].ad_type = 0x3; + memcpy(wakeup_param->ad_filter[1].ad_data, data,wakeup_param->ad_filter[1].ad_len-1);// 1100 0000 0000 0000 0000 0000 0000 0000 //0xc0000000 + wakeup_param->ad_filter[1].ad_data_mask = 0xc0000000; + wakeup_param->ad_filter[1].ad_role = ROLE_COMBO|(COMBO_0<<4); + wakeup_param->ad_filter[1].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[1].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 2 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[2].ad_len = 0; + wakeup_param->ad_filter[2].ad_type = 0; + //memcpy(wakeup_param->ad_filter[2].ad_data, data,wakeup_param->ad_filter[2].ad_len-1);// 1100 0000 0111 1111 1100 0000 0000 0000 //0xc07fc000 + wakeup_param->ad_filter[2].ad_data_mask = 0; + wakeup_param->ad_filter[2].ad_role = ROLE_ONLY; + wakeup_param->ad_filter[2].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[2].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 3 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[3].ad_len = 0; + wakeup_param->ad_filter[3].ad_type = 0; + //memcpy(wakeup_param->ad_filter[2].ad_data, data,wakeup_param->ad_filter[2].ad_len-1);// 1100 0000 0111 1111 1100 0000 0000 0000 //0xc07fc000 + wakeup_param->ad_filter[3].ad_data_mask = 0; + wakeup_param->ad_filter[3].ad_role = ROLE_COMBO|(COMBO_1<<4); + wakeup_param->ad_filter[3].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[3].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 4 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[4].ad_len = 0; + wakeup_param->ad_filter[4].ad_type = 0x09; + //memcpy(wakeup_param->ad_filter[4].ad_data, data,wakeup_param->ad_filter[4].ad_len-1);// 1111 1111 1110 0000 0000 0000 0000 0000 //0xffe00000 + wakeup_param->ad_filter[4].ad_data_mask = 0xffe00000; + wakeup_param->ad_filter[4].ad_role = ROLE_COMBO|(COMBO_1<<4); + wakeup_param->ad_filter[4].gpio_trigger_idx = TG_IDX_0|TG_IDX_1;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[4].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[5] = 0; + } + + for(i = 0; i < (sizeof(struct ble_wakeup_param_t)/4 +1); i++){ + printk("write_blocks[%d]:0x%08X \r\n", i, write_blocks[i]); + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FE00 + (4 * i), write_blocks[i]); + } + rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR_8800D80, HOST_START_APP_AUTO); + kfree(wakeup_param); + + return -1; + } + return 0; +} + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80.h new file mode 100644 index 0000000..de6d99f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80.h @@ -0,0 +1,53 @@ +#include +#include "aicwf_usb.h" + +#define USB_DEVICE_ID_AIC_8800D80 0x8D80 +#define USB_DEVICE_ID_AIC_8800D81 0x8D81 +#define USB_DEVICE_ID_AIC_8800D40 0x8D40 +#define USB_DEVICE_ID_AIC_8800D41 0x8D41 + +#define FW_BASE_NAME_8800D80 "fmacfw_8800d80.bin" +#define FW_RF_BASE_NAME_8800D80 "fmacfw_rf_8800d80.bin" +#define FW_PATCH_BASE_NAME_8800D80 "fw_patch_8800d80.bin" +#define FW_ADID_BASE_NAME_8800D80 "fw_adid_8800d80.bin" +#define FW_PATCH_TABLE_NAME_8800D80 "fw_patch_table_8800d80.bin" + +#ifdef CONFIG_FOR_IPCAM +#define FW_BASE_NAME_8800D80_U02 "fmacfw_8800d80_u02_ipc.bin" +#define FW_BASE_NAME_8800D80_H_U02 "fmacfw_8800d80_h_u02_ipc.bin" +#else +#define FW_BASE_NAME_8800D80_U02 "fmacfw_8800d80_u02.bin" +#define FW_BASE_NAME_8800D80_H_U02 "fmacfw_8800d80_h_u02.bin" +#endif +#define FW_RF_BASE_NAME_8800D80_U02 "lmacfw_rf_8800d80_u02.bin" +#define FW_PATCH_BASE_NAME_8800D80_U02 "fw_patch_8800d80_u02.bin" +#define FW_PATCH_BASE_NAME_8800D80_U02_EXT "fw_patch_8800d80_u02_ext" +#define FW_ADID_BASE_NAME_8800D80_U02 "fw_adid_8800d80_u02.bin" +#define FW_CALIBMODE_NAME_8800D80_U02 "calibmode_8800d80.bin" +#define FW_PATCH_TABLE_NAME_8800D80_U02 "fw_patch_table_8800d80_u02.bin" + +#define FLASH_BIN_8800M80 "host_wb_8800m80.bin" + +#define FW_USERCONFIG_NAME_8800D80 "aic_userconfig_8800d80.txt" + +#define RAM_FMAC_FW_ADDR_8800D80 0x100000 +#define RAM_FMAC_RF_FW_ADDR_8800D80 0x110000 +#define FW_RAM_ADID_BASE_ADDR_8800D80 0x002017E0 +#define FW_RAM_PATCH_BASE_ADDR_8800D80 0x0020B2B0 + +#define RAM_FMAC_FW_ADDR_8800D80_U02 0x120000 +#define RAM_FMAC_RF_FW_ADDR_8800D80_U02 0x120000 +#define FW_RAM_ADID_BASE_ADDR_8800D80_U02 0x00201940 +#define FW_RAM_CALIBMODE_ADDR_8800D80_U02 0x1e0000 +#define FW_RAM_PATCH_BASE_ADDR_8800D80_U02 0x0020B43c + +#define FLASH_BIN_ADDR_8800M80 0x8000000 + +#define CHIP_ID_H_MASK 0xC0 +#define IS_CHIP_ID_H() ((chip_id & CHIP_ID_H_MASK) == CHIP_ID_H_MASK) + +int aicwf_patch_config_8800d80(struct aic_usb_dev *usb_dev); +int rwnx_plat_userconfig_load_8800d80(struct aic_usb_dev *usbdev); +int system_config_8800d80(struct aic_usb_dev *usb_dev); +int aicfw_download_fw_8800d80(struct aic_usb_dev *usb_dev); + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80x2.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80x2.c new file mode 100644 index 0000000..2a3bbf3 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80x2.c @@ -0,0 +1,763 @@ +#include "aic_txrxif.h" +#include "aicwf_usb.h" +#include "aicbluetooth.h" +#include "aic_compat_8800d80x2.h" +#include "aicwf_debug.h" +extern int ble_scan_wakeup_reboot_time; +extern uint32_t ad_data_filter_mask; +extern uint32_t gpio_num;//default select gpiob2 for fw_wakeup_host +extern uint32_t gpio_dft_lvl;//0:defalut pull down, 1:default pull up + +int rwnx_plat_bin_fw_upload_2(struct aic_usb_dev *usbdev, u32 fw_addr, + char *filename); +int rwnx_request_firmware_common(struct aic_usb_dev *usbdev, + u32** buffer, const char *filename); +void rwnx_plat_userconfig_parsing(char *buffer, int size); +void rwnx_release_firmware_common(u32** buffer); + +extern int testmode; +extern u8 chip_id; +extern u8 chip_mcu_id; + +typedef u32 (*array2_tbl_t)[2]; + +#define AIC_PATCH_MAGIG_NUM 0x48435450 // "PTCH" +#define AIC_PATCH_MAGIG_NUM_2 0x50544348 // "HCTP" +#define AIC_PATCH_BLOCK_MAX 4 + +typedef struct { + uint32_t magic_num; + uint32_t pair_start; + uint32_t magic_num_2; + uint32_t pair_count; + uint32_t block_dst[AIC_PATCH_BLOCK_MAX]; + uint32_t block_src[AIC_PATCH_BLOCK_MAX]; + uint32_t block_size[AIC_PATCH_BLOCK_MAX]; // word count +} aic_patch_t; + + +#define AIC_PATCH_OFST(mem) ((size_t) &((aic_patch_t *)0)->mem) +#define AIC_PATCH_ADDR(mem) ((u32) (aic_patch_str_base + AIC_PATCH_OFST(mem))) + +#define USER_PWROFST_COVER_CALIB_FLAG (0x01U << 0) +#define USER_CHAN_MAX_TXPWR_EN_FLAG (0x01U << 1) +#define USER_TX_USE_ANA_F_FLAG (0x01U << 2) +#define USER_APM_PRBRSP_OFFLOAD_DISABLE_FLAG (0x01U << 3) +#define USER_HE_MU_EDCA_UPDATE_DISABLE_FLAG (0x01U << 4) + +#ifdef CONFIG_POWER_LIMIT +#define CFG_USER_CHAN_MAX_TXPWR_EN 1 +#else +#define CFG_USER_CHAN_MAX_TXPWR_EN 0 +#endif +#define CFG_USER_TX_USE_ANA_F 0 +#ifdef CONFIG_PRBREQ_REPORT +#define CFG_USER_APM_PRBRSP_OFFLOAD_DISABLE 1 +#else +#define CFG_USER_APM_PRBRSP_OFFLOAD_DISABLE 0 +#endif + +u32 patch_tbl_d80x2[][2] = +{ +#ifdef CONFIG_PLATFORM_HI + {0x021c, 0x04000000},//hs amsdu + {0x0220, 0x04000000},//ss amsdu + {0x0224, 0x08000a01},//hs aggr + {0x0228, 0x08000a01},//ss aggr +#else + {0x021c, 0x04000000},//hs amsdu + {0x0220, 0x04010101},//ss amsdu + {0x0224, 0x50000a01},//hs aggr + {0x0228, 0x50000a00},//ss aggr +#endif + +#ifdef CONFIG_FLASH_CALRES + {0x0234, 0x0000004F}, // cal_res_stored_in_flash_flags +#endif + + {0x01f0, 0x00000001 + #if CFG_USER_CHAN_MAX_TXPWR_EN + | USER_CHAN_MAX_TXPWR_EN_FLAG + #endif + #if CFG_USER_TX_USE_ANA_F + | USER_TX_USE_ANA_F_FLAG + #endif + #if CFG_USER_APM_PRBRSP_OFFLOAD_DISABLE + | USER_APM_PRBRSP_OFFLOAD_DISABLE_FLAG + #endif + }, // user_ext_flags +}; + +//adap test +u32 adaptivity_patch_tbl_d80x2[][2] = { + {0x000C, 0x0000320A}, //linkloss_thd + {0x009C, 0x00000000}, //ac_param_conf + {0x01D0, 0x00010000}, //tx_adaptivity_en +}; + +u32 syscfg_tbl_masked_8800d80x2[][3] = { +}; + +u32 syscfg_tbl_8800d80x2[][2] = { + #ifdef CONFIG_PMIC_SETTING + {0x70001408, 0x00000000}, // stop wdg + #endif /* CONFIG_PMIC_SETTING */ + {0x40500010, 0x00000006},//cpu performance + {0x40500024, 0x0000001f}, +}; + +extern int adap_test; + +#define NEW_PATCH_BUFFER_MAP 1 + +int aicwf_patch_config_8800d80x2(struct aic_usb_dev *usb_dev) +{ +#if 1 + u32 rd_patch_addr; + u32 aic_patch_addr; + u32 config_base, aic_patch_str_base; + #if (NEW_PATCH_BUFFER_MAP) + u32 patch_buff_addr, patch_buff_base, rd_version_addr, rd_version_val; + #endif + uint32_t start_addr; + u32 patch_addr; + u32 patch_cnt = sizeof(patch_tbl_d80x2) / 4 / 2; + struct dbg_mem_read_cfm rd_patch_addr_cfm; + int ret = 0; + int cnt = 0; + //adap test + int adap_patch_cnt = 0; + + if (adap_test) { + AICWFDBG(LOGINFO, "%s adap test \r\n", __func__); + adap_patch_cnt = sizeof(adaptivity_patch_tbl_d80x2)/sizeof(u32)/2; + } + + rd_patch_addr = RAM_FMAC_FW_ADDR_8800D80X2 + 0x01A8; + aic_patch_addr = rd_patch_addr + 8; + + AICWFDBG(LOGERROR, "Read FW mem: %08x\n", rd_patch_addr); + if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, rd_patch_addr, &rd_patch_addr_cfm))) { + AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", rd_patch_addr, ret); + return ret; + } + AICWFDBG(LOGERROR, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata); + config_base = rd_patch_addr_cfm.memdata; + + if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, aic_patch_addr, &rd_patch_addr_cfm))) { + AICWFDBG(LOGERROR, "patch_str_base[0x%x] rd fail: %d\n", aic_patch_addr, ret); + return ret; + } + AICWFDBG(LOGERROR, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata); + aic_patch_str_base = rd_patch_addr_cfm.memdata; + + #if (NEW_PATCH_BUFFER_MAP) + rd_version_addr = RAM_FMAC_FW_ADDR_8800D80X2 + 0x01C; + if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, rd_version_addr, &rd_patch_addr_cfm))) { + AICWFDBG(LOGERROR, "version val[0x%x] rd fail: %d\n", rd_version_addr, ret); + return ret; + } + rd_version_val = rd_patch_addr_cfm.memdata; + AICWFDBG(LOGINFO, "rd_version_val=%08X\n", rd_version_val); + usb_dev->fw_version_uint = rd_version_val; + patch_buff_addr = rd_patch_addr + 12; + ret = rwnx_send_dbg_mem_read_req(usb_dev, patch_buff_addr, &rd_patch_addr_cfm); + if (ret) { + AICWFDBG(LOGERROR, "patch buf rd fail\n"); + return ret; + } + AICWFDBG(LOGINFO, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata); + patch_buff_base = rd_patch_addr_cfm.memdata; + patch_addr = start_addr = patch_buff_base; + #endif + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(magic_num), AIC_PATCH_MAGIG_NUM))) { + AICWFDBG(LOGERROR, "maigic_num[0x%x] write fail: %d\n", AIC_PATCH_ADDR(magic_num), ret); + return ret; + } + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(magic_num_2), AIC_PATCH_MAGIG_NUM_2))) { + AICWFDBG(LOGERROR, "maigic_num[0x%x] write fail: %d\n", AIC_PATCH_ADDR(magic_num_2), ret); + return ret; + } + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(pair_start), patch_addr))) { + AICWFDBG(LOGERROR, "pair_start[0x%x] write fail: %d\n", AIC_PATCH_ADDR(pair_start), ret); + return ret; + } + + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(pair_count), patch_cnt + adap_patch_cnt))) { + AICWFDBG(LOGERROR, "pair_count[0x%x] write fail: %d\n", AIC_PATCH_ADDR(pair_count), ret); + return ret; + } + + for (cnt = 0; cnt < patch_cnt; cnt++) { + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, patch_tbl_d80x2[cnt][0]+config_base))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, patch_tbl_d80x2[cnt][1]))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt+4); + return ret; + } + } + + if (adap_test){ + int tmp_cnt = patch_cnt + adap_patch_cnt; + AICWFDBG(LOGINFO, "%s set adap_test patch \r\n", __func__); + for (cnt = patch_cnt; cnt < tmp_cnt; cnt++) { + int tbl_idx = cnt - patch_cnt; + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, adaptivity_patch_tbl_d80x2[tbl_idx][0]+config_base))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, adaptivity_patch_tbl_d80x2[tbl_idx][1]))) { + AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt+4); + return ret; + } + } + } + + /* + * Patch block 0 ~ 3, that is void by default, can be set as: + * + * const u32 patch_block_0[3] = {0x11223344, 0x55667788, 0xaabbccdd}; + * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_dst[0]), 0x160000))) { + * printk("block_dst [0x%x] write fail: %d\n", (u32)(&aic_patch->block_dst[0]), ret); + * } + * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_src[0]), 0x307000))) { + * printk("block_src [0x%x] write fail: %d\n", (u32)(&aic_patch->block_src[0]), ret); + * } + * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_size[0]), sizeof(patch_block_0) / sizeof(u32)))) { + * printk("block_size[0x%x] write fail: %d\n", (u32)(&aic_patch->block_size[0]), ret); + * } + * if ((ret = rwnx_send_dbg_mem_block_write_req(usb_dev, 0x307000, sizeof(patch_block_0), patch_block_0))) { + * printk("blk set fail: %d\n", ret); + * } + */ + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[0]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[0]), ret); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[1]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[1]), ret); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[2]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[2]), ret); + return ret; + } + if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[3]), 0))) { + AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[3]), ret); + return ret; + } +#endif + + return 0; +} + + +#if 0 +extern char aic_fw_path[200]; + +int rwnx_plat_userconfig_load_8800d80x2(struct aic_usb_dev *usb_dev){ + int size; + u32 *dst=NULL; + char *filename = FW_USERCONFIG_NAME_8800D80X2; + + AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename); + + /* load file */ + size = rwnx_request_firmware_common(usb_dev, &dst, filename); + if (size <= 0) { + AICWFDBG(LOGERROR, "wrong size of firmware file\n"); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size); + + rwnx_plat_userconfig_parsing((char *)dst, size); + + rwnx_release_firmware_common(&dst); + + AICWFDBG(LOGINFO, "userconfig download complete\n\n"); + return 0; + +} +#endif +int system_config_8800d80x2(struct aic_usb_dev *usb_dev){ + int syscfg_num; + int ret, cnt; + const u32 mem_addr = 0x40500000; + const u32 mem_addr2 = 0x40500004; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr, &rd_mem_addr_cfm); + if (ret) { + printk("%x rd fail: %d\n", mem_addr, ret); + return ret; + } + chip_id = rd_mem_addr_cfm.memdata >> 16; + + ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr2, &rd_mem_addr_cfm); + if (ret) { + printk("%x rd fail: %d\n", mem_addr2, ret); + return ret; + } + if (((rd_mem_addr_cfm.memdata >> 17) & 0x01UL) == 0x00UL) { + chip_mcu_id = 1; + } + + printk("chip_id=%x, chip_mcu_id = %d\n", chip_id, chip_mcu_id); + #if 1 + syscfg_num = sizeof(syscfg_tbl_8800d80x2) / sizeof(u32) / 2; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl_8800d80x2[cnt][0], syscfg_tbl_8800d80x2[cnt][1]); + if (ret) { + printk("%x write fail: %d\n", syscfg_tbl_8800d80x2[cnt][0], ret); + return ret; + } + } + syscfg_num = sizeof(syscfg_tbl_masked_8800d80x2) / sizeof(u32) / 3; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_mask_write_req(usb_dev, + syscfg_tbl_masked_8800d80x2[cnt][0], syscfg_tbl_masked_8800d80x2[cnt][1], syscfg_tbl_masked_8800d80x2[cnt][2]); + if (ret) { + printk("%x mask write fail: %d\n", syscfg_tbl_masked_8800d80x2[cnt][0], ret); + return ret; + } + } + #endif + + return 0; +} + + +static int aicbt_ext_patch_data_load(struct aic_usb_dev *usb_dev, struct aicbt_patch_info_t *patch_info, const char *filename) +{ + int ret = 0; + uint32_t ext_patch_nb = patch_info->ext_patch_nb; + char ext_patch_file_name[50]; + int index = 0; + uint32_t id = 0; + uint32_t addr = 0; + uint32_t mem_w_add = 0; + uint32_t mem_w_data = 0; + + if (ext_patch_nb > 0){ + AICWFDBG(LOGDEBUG, "[0x40480000]: 0x00040220\n"); + mem_w_add = 0x40580000; + mem_w_data = 0x00040220; + AICWFDBG(LOGDEBUG, "%s addr:0x%x data:0x%x \n", __func__, mem_w_add, mem_w_data); + ret = rwnx_send_dbg_mem_write_req(usb_dev, mem_w_add, mem_w_data); + if (ret) { + printk("%x wr fail: %d\n", mem_w_add, ret); + return ret; + } + + for (index = 0; index < patch_info->ext_patch_nb; index++){ + id = *(patch_info->ext_patch_param + (index * 2)); + addr = *(patch_info->ext_patch_param + (index * 2) + 1); + memset(ext_patch_file_name, 0, sizeof(ext_patch_file_name)); + sprintf(ext_patch_file_name,"%s%d.bin", + filename, + id); + AICWFDBG(LOGDEBUG, "%s ext_patch_file_name:%s ext_patch_id:%x ext_patch_addr:%x \r\n", + __func__,ext_patch_file_name, id, addr); + + if (rwnx_plat_bin_fw_upload_android(usb_dev, addr, ext_patch_file_name)) { + ret = -1; + break; + } + } + } + return ret; +} + + +int aicfw_download_fw_8800d80x2(struct aic_usb_dev *usb_dev) +{ +#ifdef CONFIG_USB_BT + struct aicbt_patch_table *head = NULL; + struct aicbt_patch_info_t patch_info = { + .info_len = 0, + .adid_addrinf = 0, + .addr_adid = 0, + .patch_addrinf = 0, + .addr_patch = 0, + .reset_addr = 0, + .reset_val = 0, + .adid_flag_addr = 0, + .adid_flag = 0, + }; + + int i = 0; + +#ifdef CONFIG_LOAD_BT_CONF + aicbt_parse_config(usb_dev, FW_BT_CONF_NAME_8800D80X2); +#endif + + if (chip_id < CHIP_REV_U05) { + head = aicbt_patch_table_alloc(usb_dev, FW_PATCH_TABLE_NAME_8800D80X2_U03); + } else { + head = aicbt_patch_table_alloc(usb_dev, FW_PATCH_TABLE_NAME_8800D80X2_U05); + } + if (head == NULL){ + printk("aicbt_patch_table_alloc fail\n"); + return -1; + } + + if(head == NULL){ + return -1; + } + if (chip_id < CHIP_REV_U05) { + patch_info.addr_adid = FW_RAM_ADID_BASE_ADDR_8800D80X2_U03; + patch_info.addr_patch = FW_RAM_PATCH_BASE_ADDR_8800D80X2_U03; + } else { + patch_info.addr_adid = FW_RAM_ADID_BASE_ADDR_8800D80X2_U05; + patch_info.addr_patch = FW_RAM_PATCH_BASE_ADDR_8800D80X2_U05; + } + aicbt_patch_info_unpack(&patch_info, head); + if(patch_info.info_len == 0) { + printk("%s, aicbt_patch_info_unpack fail\n", __func__); + return -1; + } + + printk("addr_adid 0x%x, addr_patch 0x%x\n", patch_info.addr_adid, patch_info.addr_patch); + + if (chip_mcu_id) { + int ret = 0; + u32 retry = 0; + const u32 mem_addr = 0x40506030; + const u32 mem_addr2 = 0x40506004; + u32 mem_data; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr, &rd_mem_addr_cfm); + if (ret) { + printk("%x rd fail: %d\n", mem_addr, ret); + return ret; + } + if (0 == (rd_mem_addr_cfm.memdata & (0x01UL << 2))) { + ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr2, &rd_mem_addr_cfm); + if (ret) { + printk("%x rd fail: %d\n", mem_addr2, ret); + return ret; + } + mem_data = (rd_mem_addr_cfm.memdata | (0x01UL << 17)) & (~(0x01UL << 18)); + + ret = rwnx_send_dbg_mem_write_req(usb_dev, mem_addr2, mem_data); + if (ret) { + printk("%x wr fail: %d\n", mem_addr2, ret); + return ret; + } + mdelay(1); + + while (1) { + ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr, &rd_mem_addr_cfm); + if (ret) { + printk("%x rd fail: %d\n", mem_addr, ret); + return ret; + } + if (0 == (rd_mem_addr_cfm.memdata & (0x01UL << 2))) { + mdelay(1); + retry++; + if (retry > 20) { + printk("bt pwron timeout\n"); + return -1; + } + } else { + break; + } + } + } + } +#endif + + if(testmode == FW_NORMAL_MODE){ + + if (chip_id < CHIP_REV_U05){ +#ifdef CONFIG_USB_BT + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80X2_U03)) { + return -1; + } + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80X2_U03)) { + return -1; + } + + if (aicbt_ext_patch_data_load(usb_dev, &patch_info, FW_PATCH_BASE_NAME_8800D80X2_U03_EXT)) { + return -1; + } + + if (aicbt_patch_table_load(usb_dev, head)) { + return -1; + } + + mdelay(100); +#endif + + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_FW_ADDR_8800D80X2, FW_BASE_NAME_8800D80X2)) { + return -1; + } + #if 0 + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_CALIBMODE_ADDR_8800D80X2_U02, FW_CALIBMODE_NAME_8800D80X2_U02)) { + return -1; + } + if (rwnx_send_dbg_mem_write_req(usb_dev, 0x40500048, 0x1e0000)) + return -1; + #endif + if (aicwf_patch_config_8800d80x2(usb_dev)) { + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_FW_ADDR_8800D80X2, HOST_START_APP_AUTO)) { + return -1; + } + }else { +#ifdef CONFIG_USB_BT + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80X2_U05)) { + return -1; + } + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80X2_U05)) { + return -1; + } + if (aicbt_ext_patch_data_load(usb_dev, &patch_info, FW_PATCH_BASE_NAME_8800D80X2_U05_EXT)) { + return -1; + } + if (aicbt_patch_table_load(usb_dev, head)) { + return -1; + } + + mdelay(100); +#endif + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_FW_ADDR_8800D80X2, FW_BASE_NAME_8800D80X2)) { + return -1; + } + if (aicwf_patch_config_8800d80x2(usb_dev)) { + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_FW_ADDR_8800D80X2, HOST_START_APP_AUTO)) { + return -1; + } + } + }else if(testmode == FW_TEST_MODE){ + if (chip_id < CHIP_REV_U05){ +#ifdef CONFIG_USB_BT + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80X2_U03)) { + return -1; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80X2_U03)) { + return -1; + } + + if (aicbt_ext_patch_data_load(usb_dev, &patch_info, FW_PATCH_BASE_NAME_8800D80X2_U03_EXT)) { + return -1; + } + + if (aicbt_patch_table_load(usb_dev, head)) { + return -1; + } + + mdelay(100); +#endif + + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80X2, FW_RF_BASE_NAME_8800D80X2)) { + AICWFDBG(LOGERROR,"%s wifi fw download fail \r\n", __func__); + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80X2, HOST_START_APP_AUTO)) { + return -1; + } + } else { +#ifdef CONFIG_USB_BT + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_adid, FW_ADID_BASE_NAME_8800D80X2_U05)) { + return -1; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, patch_info.addr_patch, FW_PATCH_BASE_NAME_8800D80X2_U05)) { + return -1; + } + + if (aicbt_ext_patch_data_load(usb_dev, &patch_info, FW_PATCH_BASE_NAME_8800D80X2_U05_EXT)) { + return -1; + } + + if (aicbt_patch_table_load(usb_dev, head)) { + return -1; + } + + mdelay(100); +#endif + if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80X2, FW_RF_BASE_NAME_8800D80X2)) { + AICWFDBG(LOGERROR,"%s wifi fw download fail \r\n", __func__); + return -1; + } + if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80X2, HOST_START_APP_AUTO)) { + return -1; + } + } + }else if(testmode == FW_BLE_SCAN_AD_FILTER_MODE){ +/* + data and ad_data_filter_mask instructions for use + ex. + data[18] = {0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42}; + mask = 1100 0000 0111 1111 1100 0000 0000 0000 = 0xc07fc000 + + data = 0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42 + mask = 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0...... fill 0 + + data & mask = "0x46 0x00" 0x00 0x00 0x00 0x00 0x00 0x00 0x00 "0x30 0xff 0xff 0x43 0x52 0x45 0x4c 0x42" + using data & mask value condition to wakeup host_wake_bt gpio +*/ + + struct ble_wakeup_param_t* wakeup_param = (struct ble_wakeup_param_t*)kmalloc(sizeof(struct ble_wakeup_param_t), GFP_KERNEL); + uint32_t *write_blocks = (uint32_t *)wakeup_param; + + printk("%s ble scan wakeup \r\n", __func__); + + memset(wakeup_param, 0, sizeof(struct ble_wakeup_param_t)); + rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR_8800D80, FW_BLE_SCAN_AD_FILTER_NAME); + wakeup_param->magic_num = 0x53454C42;//magic_num + wakeup_param->delay_scan_to = 1000;//delay start scan time(ms) + wakeup_param->reboot_to = ble_scan_wakeup_reboot_time;//reboot time + /******************************************************************/ + ///gpio_trigger_idx : 0 if wakeup_param->gpio_dft_lvl[0]=0xfe,this idx will be invalid. + wakeup_param->gpio_num[0] = gpio_num;////default select gpiob2 for fw_wakeup_host + wakeup_param->gpio_dft_lvl[0] = gpio_dft_lvl;////0:defalut pull down, 1:default pull up + ///gpio_trigger_idx : 1 if wakeup_param->gpio_dft_lvl[1]=0xfe,this idx will be invalid. + wakeup_param->gpio_num[1] = 3;////default select gpiob2 for fw_wakeup_host + wakeup_param->gpio_dft_lvl[1] = 1;////0:defalut pull down, 1:default pull up + /********************************************************************/ + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 0 + { + const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[0].ad_len = 12; + wakeup_param->ad_filter[0].ad_type = 0x09; + memcpy(wakeup_param->ad_filter[0].ad_data, data,wakeup_param->ad_filter[0].ad_len-1);// 1111 1111 1110 0000 0000 0000 0000 0000 //0xffe00000 + wakeup_param->ad_filter[0].ad_data_mask = 0xffe00000; + wakeup_param->ad_filter[0].ad_role = ROLE_COMBO|(COMBO_0<<4); + wakeup_param->ad_filter[0].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[0].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 1 + { + const uint8_t data[2] = {0x12,0x18}; + wakeup_param->ad_filter[1].ad_len = 3; + wakeup_param->ad_filter[1].ad_type = 0x3; + memcpy(wakeup_param->ad_filter[1].ad_data, data,wakeup_param->ad_filter[1].ad_len-1);// 1100 0000 0000 0000 0000 0000 0000 0000 //0xc0000000 + wakeup_param->ad_filter[1].ad_data_mask = 0xc0000000; + wakeup_param->ad_filter[1].ad_role = ROLE_COMBO|(COMBO_0<<4); + wakeup_param->ad_filter[1].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[1].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 2 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[2].ad_len = 0; + wakeup_param->ad_filter[2].ad_type = 0; + //memcpy(wakeup_param->ad_filter[2].ad_data, data,wakeup_param->ad_filter[2].ad_len-1);// 1100 0000 0111 1111 1100 0000 0000 0000 //0xc07fc000 + wakeup_param->ad_filter[2].ad_data_mask = 0; + wakeup_param->ad_filter[2].ad_role = ROLE_ONLY; + wakeup_param->ad_filter[2].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[2].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 3 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[3].ad_len = 0; + wakeup_param->ad_filter[3].ad_type = 0; + //memcpy(wakeup_param->ad_filter[2].ad_data, data,wakeup_param->ad_filter[2].ad_len-1);// 1100 0000 0111 1111 1100 0000 0000 0000 //0xc07fc000 + wakeup_param->ad_filter[3].ad_data_mask = 0; + wakeup_param->ad_filter[3].ad_role = ROLE_COMBO|(COMBO_1<<4); + wakeup_param->ad_filter[3].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[3].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 4 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[4].ad_len = 0; + wakeup_param->ad_filter[4].ad_type = 0x09; + //memcpy(wakeup_param->ad_filter[4].ad_data, data,wakeup_param->ad_filter[4].ad_len-1);// 1111 1111 1110 0000 0000 0000 0000 0000 //0xffe00000 + wakeup_param->ad_filter[4].ad_data_mask = 0xffe00000; + wakeup_param->ad_filter[4].ad_role = ROLE_COMBO|(COMBO_1<<4); + wakeup_param->ad_filter[4].gpio_trigger_idx = TG_IDX_0|TG_IDX_1;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[4].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[5] = 0; + } + + for(i = 0; i < (sizeof(struct ble_wakeup_param_t)/4 +1); i++){ + printk("write_blocks[%d]:0x%08X \r\n", i, write_blocks[i]); + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FE00 + (4 * i), write_blocks[i]); + } + //rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR_8800D80, HOST_START_APP_AUTO); + + int ret; + ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x40500048, RAM_FW_BLE_SCAN_WAKEUP_ADDR_8800D80); + if (ret) { + printk("%x write fail\n", ret); + } + ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x4050012c, 0x00000040); + if (ret) { + printk("%x write fail\n", ret); + } + + + kfree(wakeup_param); + + return -1; + + } + + return 0; +} + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80x2.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80x2.h new file mode 100644 index 0000000..cc28308 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_compat_8800d80x2.h @@ -0,0 +1,48 @@ +#include +#include "aicwf_usb.h" + +#define USB_DEVICE_ID_AIC_8800D80X2 0x8D90 +#define USB_DEVICE_ID_AIC_8800D81X2 0x8D91 +#define USB_DEVICE_ID_AIC_8800D89X2 0x8D99 +#define USB_DEVICE_ID_AIC_8800D40X2 0x8D92 + +#ifdef CONFIG_FOR_IPCAM +#define FW_BASE_NAME_8800D80X2 "fmacfw_8800d80x2_ipc.bin" +#else +#define FW_BASE_NAME_8800D80X2 "fmacfw_8800d80x2.bin" +#endif +#define FW_RF_BASE_NAME_8800D80X2 "lmacfw_rf_8800d80x2.bin" + +#define FW_PATCH_BASE_NAME_8800D80X2_U03 "fw_patch_8800d80x2_u03.bin" +#define FW_PATCH_BASE_NAME_8800D80X2_U03_EXT "fw_patch_8800d80x2_u03_ext" +#define FW_ADID_BASE_NAME_8800D80X2_U03 "fw_adid_8800d80x2_u03.bin" +#define FW_PATCH_TABLE_NAME_8800D80X2_U03 "fw_patch_table_8800d80x2_u03.bin" + +#define FW_PATCH_BASE_NAME_8800D80X2_U05 "fw_patch_8800d80x2_u05.bin" +#define FW_PATCH_BASE_NAME_8800D80X2_U05_EXT "fw_patch_8800d80x2_u05_ext" +#define FW_ADID_BASE_NAME_8800D80X2_U05 "fw_adid_8800d80x2_u05.bin" +#define FW_PATCH_TABLE_NAME_8800D80X2_U05 "fw_patch_table_8800d80x2_u05.bin" + +#define FW_BT_CONF_NAME_8800D80X2 "aicbt.conf" + +#define FLASH_BIN_8800M80X2 "host_wb_8800m80x2.bin" + +#define FW_USERCONFIG_NAME_8800D80X2 "aic_userconfig_8800d80x2.txt" + +#define RAM_FMAC_FW_ADDR_8800D80X2 0x120000 +#define RAM_FMAC_RF_FW_ADDR_8800D80X2 0x120000 + +#define FW_RAM_ADID_BASE_ADDR_8800D80X2_U03 0x003018f8 +#define FW_RAM_PATCH_BASE_ADDR_8800D80X2_U03 0x0030b494 + +#define FW_RAM_ADID_BASE_ADDR_8800D80X2_U05 0x003018f8 +#define FW_RAM_PATCH_BASE_ADDR_8800D80X2_U05 0x0030b48c + +#define FLASH_BIN_ADDR_8800M80X2 0x8000000 + + +int aicwf_patch_config_8800d80x2(struct aic_usb_dev *usb_dev); +int rwnx_plat_userconfig_load_8800d80x2(struct aic_usb_dev *usbdev); +int system_config_8800d80x2(struct aic_usb_dev *usb_dev); +int aicfw_download_fw_8800d80x2(struct aic_usb_dev *usb_dev); + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_txrxif.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_txrxif.c new file mode 100644 index 0000000..6bbe19f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_txrxif.c @@ -0,0 +1,487 @@ +/** + * aicwf_bus.c + * + * bus function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PLATFORM_UBUNTU +#if LINUX_VERSION_CODE > KERNEL_VERSION(4, 10, 0) +#include +#endif +#endif +#include "aic_txrxif.h" +#include "aicbluetooth.h" +#include "aicbluetooth_cmds.h" + +int aicwf_bus_init(uint bus_hdrlen, struct device *dev) +{ + int ret = 0; + struct aicwf_bus *bus_if; + + if (!dev) { + txrx_err("device not found\n"); + return -1; + } + bus_if = dev_get_drvdata(dev); + #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + bus_if->cmd_buf = usb_alloc_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr); + #else + bus_if->cmd_buf = usb_buffer_alloc(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr); + #endif + #else + bus_if->cmd_buf = kzalloc(CMD_BUF_MAX, GFP_KERNEL); + #endif + if(!bus_if->cmd_buf) { + ret = -ENOMEM; + txrx_err("proto_attach failed\n"); + goto fail; + } + memset(bus_if->cmd_buf, '\0', CMD_BUF_MAX); + + init_completion(&bus_if->bustx_trgg); + init_completion(&bus_if->busrx_trgg); +#ifdef AICWF_SDIO_SUPPORT + bus_if->bustx_thread = kthread_run(sdio_bustx_thread, (void *)bus_if, "aicwf_bustx_thread"); + bus_if->busrx_thread = kthread_run(sdio_busrx_thread, (void *)bus_if->bus_priv.sdio->rx_priv, "aicwf_busrx_thread"); +#endif +#ifdef AICWF_USB_SUPPORT + bus_if->bustx_thread = kthread_run(usb_bustx_thread, (void *)bus_if, "aicwf_bustx_thread"); + bus_if->busrx_thread = kthread_run(usb_busrx_thread, (void *)bus_if->bus_priv.usb->rx_priv, "aicwf_busrx_thread"); +#endif + + if (IS_ERR(bus_if->bustx_thread)) { + bus_if->bustx_thread = NULL; + txrx_err("aicwf_bustx_thread run fail\n"); + goto fail; + } + + if (IS_ERR(bus_if->busrx_thread)) { + bus_if->busrx_thread = NULL; + txrx_err("aicwf_bustx_thread run fail\n"); + goto fail; + } + + return ret; +fail: + aicwf_bus_deinit(dev); + + return ret; +} + +void aicwf_bus_deinit(struct device *dev) +{ + struct aicwf_bus *bus_if; + struct aic_usb_dev *usbdev; + + if (!dev) { + txrx_err("device not found\n"); + return; + } + printk("%s", __func__); + + mdelay(500); + bus_if = dev_get_drvdata(dev); + aicwf_bus_stop(bus_if); + + usbdev = bus_if->bus_priv.usb; + usbdev->app_cmp = false; + aic_bt_platform_deinit(usbdev); + + if (bus_if->cmd_buf) { + #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + usb_free_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr); + #else + usb_buffer_free(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr); + #endif + #else + kfree(bus_if->cmd_buf); + #endif + bus_if->cmd_buf = NULL; + } + + if (bus_if->bustx_thread) { + complete_all(&bus_if->bustx_trgg); + kthread_stop(bus_if->bustx_thread); + bus_if->bustx_thread = NULL; + } + printk("exit %s\n", __func__); +} + +void aicwf_frame_tx(void *dev, struct sk_buff *skb) +{ + struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev; + aicwf_bus_txdata(usbdev->bus_if, skb); +} + +struct aicwf_tx_priv* aicwf_tx_init(void *arg) +{ + struct aicwf_tx_priv* tx_priv; + + tx_priv = kzalloc(sizeof(struct aicwf_tx_priv), GFP_KERNEL); + if (!tx_priv) + return NULL; + + tx_priv->usbdev = (struct aic_usb_dev *)arg; + + atomic_set(&tx_priv->aggr_count, 0); + tx_priv->aggr_buf = dev_alloc_skb(MAX_AGGR_TXPKT_LEN); + if(!tx_priv->aggr_buf) { + txrx_err("Alloc bus->txdata_buf failed!\n"); + kfree(tx_priv); + return NULL; + } + tx_priv->head = tx_priv->aggr_buf->data; + tx_priv->tail = tx_priv->aggr_buf->data; + + return tx_priv; +} + +void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv) +{ + if (tx_priv && tx_priv->aggr_buf) + dev_kfree_skb(tx_priv->aggr_buf); + + kfree(tx_priv); + tx_priv = NULL; +} + +static bool aicwf_another_ptk(struct sk_buff *skb) +{ + u8 *data; + u16 aggr_len = 0; + + if(skb->data == NULL || skb->len == 0) { + return false; + } + data = skb->data; + aggr_len = (*skb->data | (*(skb->data + 1) << 8)); + if(aggr_len == 0) { + return false; + } + + return true; +} + +int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv) +{ + int ret = 0; + unsigned long flags = 0; + struct sk_buff *skb = NULL; + u16 pkt_len = 0; + struct sk_buff *skb_inblock = NULL; + u16 aggr_len = 0, adjust_len = 0; + u8 *data = NULL; + + while (1) { + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if(aicwf_is_framequeue_empty(&rx_priv->rxq)) { + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + break; + } + skb = aicwf_frame_dequeue(&rx_priv->rxq); + spin_unlock_irqrestore(&rx_priv->rxqlock,flags); + if (skb == NULL) { + txrx_err("skb_error\r\n"); + break; + } + while(aicwf_another_ptk(skb)) { + data = skb->data; + pkt_len = (*skb->data | (*(skb->data + 1) << 8)); + + if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data + aggr_len = pkt_len + RX_HWHRD_LEN; + + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL);//8 is for ccmp mic or wep icv + if(skb_inblock == NULL){ + txrx_err("no more space!\n"); + aicwf_dev_skb_free(skb); + return -EBADE; + } + + skb_put(skb_inblock, aggr_len); + memcpy(skb_inblock->data, data, aggr_len); + skb_pull(skb, adjust_len); + } + else { // type : config + aggr_len = pkt_len; + + if (aggr_len & (RX_ALIGNMENT - 1)) + adjust_len = roundup(aggr_len, RX_ALIGNMENT); + else + adjust_len = aggr_len; + + skb_inblock = __dev_alloc_skb(aggr_len+4, GFP_KERNEL); + if(skb_inblock == NULL){ + txrx_err("no more space!\n"); + aicwf_dev_skb_free(skb); + return -EBADE; + } + + skb_put(skb_inblock, aggr_len+4); + memcpy(skb_inblock->data, data, aggr_len+4); + if((*(skb_inblock->data + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) + rwnx_rx_handle_msg(rx_priv->usbdev, (struct ipc_e2a_msg *)(skb_inblock->data + 4)); + skb_pull(skb, adjust_len+4); + } + } + + dev_kfree_skb(skb_inblock); + dev_kfree_skb(skb); + atomic_dec(&rx_priv->rx_cnt); + } + + return ret; +} + +static struct recv_msdu *aicwf_rxframe_queue_init(struct list_head *q, int qsize) +{ + int i; + struct recv_msdu *req, *reqs; + + reqs = vmalloc(qsize*sizeof(struct recv_msdu)); + if (reqs == NULL) + return NULL; + + req = reqs; + for (i = 0; i < qsize; i++) { + INIT_LIST_HEAD(&req->rxframe_list); + list_add(&req->rxframe_list, q); + req->len = 0; + req++; + } + + return reqs; +} + +struct aicwf_rx_priv *aicwf_rx_init(void *arg) +{ + struct aicwf_rx_priv* rx_priv; + rx_priv = kzalloc(sizeof(struct aicwf_rx_priv), GFP_KERNEL); + if (!rx_priv) + return NULL; + + rx_priv->usbdev = (struct aic_usb_dev *)arg; + aicwf_frame_queue_init(&rx_priv->rxq, 1, MAX_RXQLEN); + spin_lock_init(&rx_priv->rxqlock); + atomic_set(&rx_priv->rx_cnt, 0); + + INIT_LIST_HEAD(&rx_priv->rxframes_freequeue); + spin_lock_init(&rx_priv->freeq_lock); + rx_priv->recv_frames = aicwf_rxframe_queue_init(&rx_priv->rxframes_freequeue, MAX_REORD_RXFRAME); + if (!rx_priv->recv_frames) { + txrx_err("no enough buffer for free recv frame queue!\n"); + kfree(rx_priv); + return NULL; + } + spin_lock_init(&rx_priv->stas_reord_lock); + INIT_LIST_HEAD(&rx_priv->stas_reord_list); + + return rx_priv; +} + + +static void aicwf_recvframe_queue_deinit(struct list_head *q) +{ + struct recv_msdu *req, *next; + + list_for_each_entry_safe(req, next, q, rxframe_list) { + list_del_init(&req->rxframe_list); + } +} + +void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv) +{ + //struct reord_ctrl_info *reord_info, *tmp; + aicwf_frame_queue_flush(&rx_priv->rxq); + aicwf_recvframe_queue_deinit(&rx_priv->rxframes_freequeue); + if (rx_priv->recv_frames) + vfree(rx_priv->recv_frames); + + if (rx_priv->usbdev->bus_if->busrx_thread) { + complete_all(&rx_priv->usbdev->bus_if->busrx_trgg); + send_sig(SIGTERM, rx_priv->usbdev->bus_if->busrx_thread, 1); + kthread_stop(rx_priv->usbdev->bus_if->busrx_thread); + rx_priv->usbdev->bus_if->busrx_thread = NULL; + } + + kfree(rx_priv); + rx_priv = NULL; +} + +bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt) +{ + return aicwf_frame_enq(dev, q, pkt, 0); +} + + +void aicwf_dev_skb_free(struct sk_buff *skb) +{ + if (!skb) + return; + + dev_kfree_skb_any(skb); +} + +static struct sk_buff *aicwf_frame_queue_penq(struct frame_queue *pq, int prio, struct sk_buff *p) +{ + struct sk_buff_head *q; + + if (pq->queuelist[prio].qlen >= pq->qmax) + return NULL; + + q = &pq->queuelist[prio]; + __skb_queue_tail(q, p); + pq->qcnt++; + if (pq->hi_prio < prio) + pq->hi_prio = (u16)prio; + + return p; +} + +void aicwf_frame_queue_flush(struct frame_queue *pq) +{ + int prio; + struct sk_buff_head *q; + struct sk_buff *p, *next; + + for (prio = 0; prio < pq->num_prio; prio++) + { + q = &pq->queuelist[prio]; + skb_queue_walk_safe(q, p, next) { + skb_unlink(p, q); + aicwf_dev_skb_free(p); + pq->qcnt--; + } + } +} + +void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len) +{ + int prio; + + memset(pq, 0, offsetof(struct frame_queue, queuelist) + (sizeof(struct sk_buff_head) * num_prio)); + pq->num_prio = (u16)num_prio; + pq->qmax = (u16)max_len; + + for (prio = 0; prio < num_prio; prio++) { + skb_queue_head_init(&pq->queuelist[prio]); + } +} + +struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out) +{ + int prio; + + if (pq->qcnt == 0) + return NULL; + + for (prio = 0; prio < pq->hi_prio; prio++) + if (!skb_queue_empty(&pq->queuelist[prio])) + break; + + if (prio_out) + *prio_out = prio; + + return skb_peek_tail(&pq->queuelist[prio]); +} + +bool aicwf_is_framequeue_empty(struct frame_queue *pq) +{ + int prio, len = 0; + + for (prio = 0; prio <= pq->hi_prio; prio++) + len += pq->queuelist[prio].qlen; + + if(len > 0) + return false; + else + return true; +} + +struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq) +{ + struct sk_buff_head *q; + struct sk_buff *p; + int prio; + + if (pq->qcnt == 0) + return NULL; + + while ((prio = pq->hi_prio) > 0 && skb_queue_empty(&pq->queuelist[prio])) + pq->hi_prio--; + + q = &pq->queuelist[prio]; + p = __skb_dequeue(q); + if (p == NULL) + return NULL; + + pq->qcnt--; + + return p; +} + +static struct sk_buff *aicwf_skb_dequeue_tail(struct frame_queue *pq, int prio) +{ + struct sk_buff_head *q = &pq->queuelist[prio]; + struct sk_buff *p = skb_dequeue_tail(q); + + if (!p) + return NULL; + + pq->qcnt--; + return p; +} + +bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio) +{ + struct sk_buff *p = NULL; + int prio_modified = -1; + + if (q->queuelist[prio].qlen < q->qmax && q->qcnt < q->qmax) { + aicwf_frame_queue_penq(q, prio, pkt); + return true; + } + if (q->queuelist[prio].qlen >= q->qmax) { + prio_modified = prio; + } else if (q->qcnt >= q->qmax) { + p = aicwf_frame_queue_peek_tail(q, &prio_modified); + if (prio_modified > prio) + return false; + } + + if (prio_modified >= 0) { + if (prio_modified == prio) + return false; + + p = aicwf_skb_dequeue_tail(q, prio_modified); + aicwf_dev_skb_free(p); + + p = aicwf_frame_queue_penq(q, prio_modified, pkt); + if (p == NULL) + txrx_err("failed\n"); + } + + return p != NULL; +} + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_txrxif.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_txrxif.h new file mode 100644 index 0000000..d566a11 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aic_txrxif.h @@ -0,0 +1,216 @@ +/** + * aicwf_txrxif.h + * + * bus function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#ifndef _AICWF_TXRXIF_H_ +#define _AICWF_TXRXIF_H_ + +#include +#include +//#include "aicsdio.h" +#include "aicwf_usb.h" + +#define CMD_BUF_MAX 1536 +#define DATA_BUF_MAX 2048 +#define TXPKT_BLOCKSIZE 512 +#define MAX_AGGR_TXPKT_LEN (1536*32) +#define CMD_TX_TIMEOUT 5000 +#define TX_ALIGNMENT 4 + +#define RX_HWHRD_LEN 60 //58->60 word allined +#define CCMP_OR_WEP_INFO 8 +#define MAX_RXQLEN 2000 +#define RX_ALIGNMENT 4 + +#define DEBUG_ERROR_LEVEL 0 +#define DEBUG_DEBUG_LEVEL 1 +#define DEBUG_INFO_LEVEL 2 + +#define DBG_LEVEL DEBUG_DEBUG_LEVEL + +#define txrx_err(fmt, ...) pr_err("txrx_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__) +#define sdio_err(fmt, ...) pr_err("sdio_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__) +#define usb_err(fmt, ...) pr_err("usb_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__) +#if DBG_LEVEL >= DEBUG_DEBUG_LEVEL +#define sdio_dbg(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__) +#define usb_dbg(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__) +#else +#define sdio_dbg(fmt, ...) +#define usb_dbg(fmt, ...) +#endif +#if DBG_LEVEL >= DEBUG_INFO_LEVEL +#define sdio_info(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__) +#define usb_info(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__) +#else +#define sdio_info(fmt, ...) +#define usb_info(fmt, ...) +#endif + +enum aicwf_bus_state { + BUS_DOWN_ST, + BUS_UP_ST +}; + +struct aicwf_bus_ops { + int (*start) (struct device * dev); + void (*stop) (struct device * dev); + int (*txdata) (struct device * dev, struct sk_buff * skb); + int (*txmsg) (struct device * dev, u8 * msg, uint len); +}; + +struct frame_queue { + u16 num_prio; + u16 hi_prio; + u16 qmax; /* max number of queued frames */ + u16 qcnt; + struct sk_buff_head queuelist[8]; +}; + +struct aicwf_bus { + union { + struct aic_sdio_dev *sdio; + struct aic_usb_dev *usb; + } bus_priv; + struct device *dev; + struct aicwf_bus_ops *ops; + enum aicwf_bus_state state; + u8 *cmd_buf; + struct completion bustx_trgg; + struct completion busrx_trgg; + struct task_struct *bustx_thread; + struct task_struct *busrx_thread; +}; + +struct aicwf_tx_priv { +#ifdef AICWF_SDIO_SUPPORT + struct aic_sdio_dev *sdiodev; + int fw_avail_bufcnt; + //for cmd tx + u8 *cmd_buf; + uint cmd_len; + bool cmd_txstate; + bool cmd_tx_succ; + struct semaphore cmd_txsema; + wait_queue_head_t cmd_txdone_wait; + //for data tx + atomic_t tx_pktcnt; + + struct frame_queue txq; + spinlock_t txqlock; + struct semaphore txctl_sema; +#endif +#ifdef AICWF_USB_SUPPORT + struct aic_usb_dev *usbdev; +#endif + struct sk_buff *aggr_buf; + atomic_t aggr_count; + u8 *head; + u8 *tail; +}; + + +#define MAX_REORD_RXFRAME 250 +#define REORDER_UPDATE_TIME 50 +#define AICWF_REORDER_WINSIZE 64 +#define SN_LESS(a, b) (((a-b)&0x800)!=0) +#define SN_EQUAL(a, b) (a == b) + +struct reord_ctrl { + struct aicwf_rx_priv *rx_priv; + u8 enable; + u16 ind_sn; + u8 wsize_b; + spinlock_t reord_list_lock; + struct list_head reord_list; + struct timer_list reord_timer; + struct work_struct reord_timer_work; +}; + +struct reord_ctrl_info { + u8 mac_addr[6]; + struct reord_ctrl preorder_ctrl[8]; + struct list_head list; +}; + +struct recv_msdu { + struct sk_buff *pkt; + u8 tid; + u16 seq_num; + uint len; + u8 *rx_data; + //for pending rx reorder list + struct list_head reord_pending_list; + //for total frame list, when rxframe from busif, dequeue, when submit frame to net, enqueue + struct list_head rxframe_list; + struct reord_ctrl *preorder_ctrl; +}; + +struct aicwf_rx_priv { + struct aic_usb_dev *usbdev; + void *rwnx_vif; + atomic_t rx_cnt; + u32 data_len; + spinlock_t rxqlock; + struct frame_queue rxq; + + spinlock_t freeq_lock; + struct list_head rxframes_freequeue; + struct list_head stas_reord_list; + spinlock_t stas_reord_lock; + struct recv_msdu *recv_frames; +}; + +static inline int aicwf_bus_start(struct aicwf_bus *bus) +{ + return bus->ops->start(bus->dev); +} + +static inline void aicwf_bus_stop(struct aicwf_bus *bus) +{ + bus->ops->stop(bus->dev); +} + +static inline int aicwf_bus_txdata(struct aicwf_bus *bus, struct sk_buff *skb) +{ + return bus->ops->txdata(bus->dev, skb); +} + +static inline int aicwf_bus_txmsg(struct aicwf_bus *bus, u8 *msg, uint len) +{ + return bus->ops->txmsg(bus->dev, msg, len); +} + +static inline void aicwf_sched_timeout(u32 millisec) +{ + ulong timeout = 0, expires = 0; + expires = jiffies + msecs_to_jiffies(millisec); + timeout = millisec; + + while (timeout) { + timeout = schedule_timeout(timeout); + if (time_after(jiffies, expires)) + break; + } +} + +int aicwf_bus_init(uint bus_hdrlen, struct device *dev); +void aicwf_bus_deinit(struct device *dev); +void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv); +void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv); +struct aicwf_tx_priv* aicwf_tx_init(void *arg); +struct aicwf_rx_priv* aicwf_rx_init(void *arg); +void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len); +void aicwf_frame_queue_flush(struct frame_queue *pq); +bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio); +bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt); +bool aicwf_is_framequeue_empty(struct frame_queue *pq); +void aicwf_frame_tx(void *dev, struct sk_buff *skb); +void aicwf_dev_skb_free(struct sk_buff *skb); +struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq); +struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out); + +#endif /* _AICWF_TXRXIF_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth.c new file mode 100755 index 0000000..ba4793b --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth.c @@ -0,0 +1,1603 @@ +#include +#include +#include "aicbluetooth_cmds.h" +#include "aicwf_usb.h" +#include "aic_txrxif.h" +#include "md5.h" +#include "aicbluetooth.h" +#include "aicwf_debug.h" +#ifdef CONFIG_USE_FW_REQUEST +#include +#endif + +//Parser state +#define INIT 0 +#define CMD 1 +#define PRINT 2 +#define GET_VALUE 3 + +extern int flash_erase_len; +int flash_write_size = 0; +u32 flash_write_bin_crc = 0; + +typedef struct +{ + int8_t enable; + int8_t dsss; + int8_t ofdmlowrate_2g4; + int8_t ofdm64qam_2g4; + int8_t ofdm256qam_2g4; + int8_t ofdm1024qam_2g4; + int8_t ofdmlowrate_5g; + int8_t ofdm64qam_5g; + int8_t ofdm256qam_5g; + int8_t ofdm1024qam_5g; +} txpwr_idx_conf_t; + + +txpwr_idx_conf_t userconfig_txpwr_idx = { + .enable = 1, + .dsss = 9, + .ofdmlowrate_2g4 = 8, + .ofdm64qam_2g4 = 8, + .ofdm256qam_2g4 = 8, + .ofdm1024qam_2g4 = 8, + .ofdmlowrate_5g = 11, + .ofdm64qam_5g = 10, + .ofdm256qam_5g = 9, + .ofdm1024qam_5g = 9 + +}; + +typedef struct +{ + int8_t enable; + int8_t chan_1_4; + int8_t chan_5_9; + int8_t chan_10_13; + int8_t chan_36_64; + int8_t chan_100_120; + int8_t chan_122_140; + int8_t chan_142_165; +} txpwr_ofst_conf_t; + +txpwr_ofst_conf_t userconfig_txpwr_ofst = { + .enable = 1, + .chan_1_4 = 0, + .chan_5_9 = 0, + .chan_10_13 = 0, + .chan_36_64 = 0, + .chan_100_120 = 0, + .chan_122_140 = 0, + .chan_142_165 = 0 +}; + +typedef struct +{ + int8_t enable; + int8_t xtal_cap; + int8_t xtal_cap_fine; +} xtal_cap_conf_t; + + +xtal_cap_conf_t userconfig_xtal_cap = { + .enable = 0, + .xtal_cap = 24, + .xtal_cap_fine = 31, +}; + +struct aicbt_info_t { + uint32_t btmode; + uint32_t btport; + uint32_t uart_baud; + uint32_t uart_flowctrl; + uint32_t lpm_enable; + uint32_t txpwr_lvl; +}; + +struct aicbsp_info_t { + int hwinfo; + uint32_t cpmode; +}; + + +enum aicbt_btport_type { + AICBT_BTPORT_NULL, + AICBT_BTPORT_MB, + AICBT_BTPORT_UART, +}; + +/* btmode + * used for force bt mode,if not AICBSP_MODE_NULL + * efuse valid and vendor_info will be invalid, even has beed set valid +*/ +enum aicbt_btmode_type { + AICBT_BTMODE_BT_ONLY_SW = 0x0, // bt only mode with switch + AICBT_BTMODE_BT_WIFI_COMBO, // wifi/bt combo mode + AICBT_BTMODE_BT_ONLY, // bt only mode without switch + AICBT_BTMODE_BT_ONLY_TEST, // bt only test mode + AICBT_BTMODE_BT_WIFI_COMBO_TEST, // wifi/bt combo test mode + AICBT_BTMODE_BT_ONLY_COANT, // bt only mode with no external switch + AICBT_MODE_NULL = 0xFF, // invalid value +}; + +/* uart_baud + * used for config uart baud when btport set to uart, + * otherwise meaningless +*/ +enum aicbt_uart_baud_type { + AICBT_UART_BAUD_115200 = 115200, + AICBT_UART_BAUD_921600 = 921600, + AICBT_UART_BAUD_1_5M = 1500000, + AICBT_UART_BAUD_3_25M = 3250000, +}; + +enum aicbt_uart_flowctrl_type { + AICBT_UART_FLOWCTRL_DISABLE = 0x0, // uart without flow ctrl + AICBT_UART_FLOWCTRL_ENABLE, // uart with flow ctrl +}; + +enum aicbsp_cpmode_type { + AICBSP_CPMODE_WORK, + AICBSP_CPMODE_TEST, +}; +#define AIC_M2D_OTA_INFO_ADDR 0x88000020 +#define AIC_M2D_OTA_DATA_ADDR 0x88000040 +#define AIC_M2D_OTA_FLASH_ADDR 0x08004000 +#define AIC_M2D_OTA_CODE_START_ADDR 0x08004188 +#define AIC_M2D_OTA_VER_ADDR 0x0800418c +///aic bt tx pwr lvl :lsb->msb: first byte, min pwr lvl; second byte, max pwr lvl; +///pwr lvl:20(min), 30 , 40 , 50 , 60(max) +#define AICBT_TXPWR_LVL 0x00006020 +#define AICBT_TXPWR_LVL_8800d80 0x00006F2F +#define AICBT_TXPWR_LVL_8800d80x2 0x00006F2F + + +#define AICBSP_MODE_BT_HCI_MODE_NULL 0 +#define AICBSP_MODE_BT_HCI_MODE_MB 1 +#define AICBSP_MODE_BT_HCI_MODE_UART 2 + +#define AICBSP_HWINFO_DEFAULT (-1) +#define AICBSP_CPMODE_DEFAULT AICBSP_CPMODE_WORK + +#define AICBT_BTMODE_DEFAULT_8800d80x2 AICBT_BTMODE_BT_ONLY_COANT +#define AICBT_BTMODE_DEFAULT_8800d80 AICBT_BTMODE_BT_ONLY_COANT +#define AICBT_BTMODE_DEFAULT AICBT_BTMODE_BT_ONLY +#define AICBT_BTPORT_DEFAULT AICBT_BTPORT_MB +#define AICBT_UART_BAUD_DEFAULT AICBT_UART_BAUD_1_5M +#define AICBT_UART_FC_DEFAULT AICBT_UART_FLOWCTRL_ENABLE +#define AICBT_LPM_ENABLE_DEFAULT 0 +#define AICBT_TXPWR_LVL_DEFAULT AICBT_TXPWR_LVL +#define AICBT_TXPWR_LVL_DEFAULT_8800d80 AICBT_TXPWR_LVL_8800d80 +#define AICBT_TXPWR_LVL_DEFAULT_8800d80x2 AICBT_TXPWR_LVL_8800d80x2 + + +#define AIC_HW_INFO 0x21 + +#define FW_PATH_MAX 200 +#if defined(CONFIG_PLATFORM_UBUNTU) +static const char* aic_default_fw_path = "/lib/firmware"; +#else +static const char* aic_default_fw_path = "/vendor/etc/firmware"; +#endif +char aic_fw_path[FW_PATH_MAX]; +module_param_string(aic_fw_path, aic_fw_path, FW_PATH_MAX, 0660); +#ifdef CONFIG_M2D_OTA_AUTO_SUPPORT +char saved_sdk_ver[64]; +module_param_string(saved_sdk_ver, saved_sdk_ver,64, 0660); +#endif + + + +const u32 crc_tab[256] = +{ + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, +}; +u32 aic_crc32(u8 *p, u32 len, u32 crc) +{ + while(len--) + { + crc = crc_tab[((crc & 0xFF) ^ *p++)] ^ (crc >> 8); + } + return crc; +} + +int aic_bt_platform_init(struct aic_usb_dev *usbdev) +{ + rwnx_cmd_mgr_init(&usbdev->cmd_mgr); + usbdev->cmd_mgr.usbdev = (void *)usbdev; + return 0; + +} + +void aic_bt_platform_deinit(struct aic_usb_dev *usbdev) +{ + rwnx_cmd_mgr_deinit(&usbdev->cmd_mgr); +} + +#define MD5(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15] +#define MD5PINRT "file md5:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\r\n" + +static int aic_load_firmware(u32 ** fw_buf, const char *name, struct device *device) +{ + +#ifdef CONFIG_USE_FW_REQUEST + const struct firmware *fw = NULL; + u32 *dst = NULL; + void *buffer=NULL; + MD5_CTX md5; + unsigned char decrypt[16]; + int size = 0; + int ret = 0; + + printk("%s: request firmware = %s \n", __func__ ,name); + + + ret = request_firmware(&fw, name, NULL); + + if (ret < 0) { + printk("Load %s fail\n", name); + release_firmware(fw); + return -1; + } + + size = fw->size; + dst = (u32 *)fw->data; + + if (size <= 0) { + printk("wrong size of firmware file\n"); + release_firmware(fw); + return -1; + } + + + buffer = vmalloc(size); + memset(buffer, 0, size); + memcpy(buffer, dst, size); + + *fw_buf = buffer; + + MD5Init(&md5); + MD5Update(&md5, (unsigned char *)buffer, size); + MD5Final(&md5, decrypt); + printk(MD5PINRT, MD5(decrypt)); + + release_firmware(fw); + + return size; +#else + void *buffer=NULL; + char *path=NULL; + struct file *fp=NULL; + int size = 0, len=0;//, i=0; + ssize_t rdlen=0; + //u32 *src=NULL, *dst = NULL; + MD5_CTX md5; + unsigned char decrypt[16]; +#if defined(CONFIG_PLATFORM_UBUNTU) + struct aicwf_bus *bus_if = dev_get_drvdata(device); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; +#endif + + /* get the firmware path */ + path = __getname(); + if (!path){ + *fw_buf=NULL; + return -1; + } + + if (strlen(aic_fw_path) > 0) { + printk("%s: use customer define fw_path\n", __func__); + len = snprintf(path, FW_PATH_MAX, "%s/%s", aic_fw_path, name); + } else { + #if defined(CONFIG_PLATFORM_UBUNTU) + if (usb_dev->chipid == PRODUCT_ID_AIC8800) { + len = snprintf(path, FW_PATH_MAX, "%s/%s/%s",aic_default_fw_path, "aic8800", name); + } else if (usb_dev->chipid == PRODUCT_ID_AIC8800D80) { + len = snprintf(path, FW_PATH_MAX, "%s/%s/%s",aic_default_fw_path, "aic8800D80", name); + } else if (usb_dev->chipid == PRODUCT_ID_AIC8800D80X2) { + len = snprintf(path, FW_PATH_MAX, "%s/%s/%s",aic_default_fw_path, "aic8800D80X2", name); + }else { + printk("%s unknown chipid %d\n", __func__, usb_dev->chipid); + } + #else + len = snprintf(path, FW_PATH_MAX, "%s/%s",aic_default_fw_path, name); + #endif + } + + if (len >= FW_PATH_MAX) { + printk("%s: %s file's path too long\n", __func__, name); + *fw_buf=NULL; + __putname(path); + return -1; + } + + printk("%s :firmware path = %s \n", __func__ ,path); + + + /* open the firmware file */ + fp=filp_open(path, O_RDONLY, 0); + if(IS_ERR(fp) || (!fp)){ + printk("%s: %s file failed to open\n", __func__, name); + if(IS_ERR(fp)) + printk("is_Err\n"); + if((!fp)) + printk("null\n"); + *fw_buf=NULL; + __putname(path); + fp=NULL; + return -1; + } + + size = i_size_read(file_inode(fp)); + if(size<=0){ + printk("%s: %s file size invalid %d\n", __func__, name, size); + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + return -1; +} + + /* start to read from firmware file */ + buffer = vmalloc(size); + memset(buffer, 0, size); + if(!buffer){ + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + return -1; + } + + + #if LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 16) + rdlen = kernel_read(fp, buffer, size, &fp->f_pos); + #else + rdlen = kernel_read(fp, fp->f_pos, buffer, size); + #endif + + if(size != rdlen){ + printk("%s: %s file rdlen invalid %d %d\n", __func__, name, (int)rdlen, size); + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + vfree(buffer); + buffer=NULL; + return -1; + } + if(rdlen > 0){ + fp->f_pos += rdlen; + //printk("f_pos=%d\n", (int)fp->f_pos); + } + + +#if 0 + /*start to transform the data format*/ + src = (u32*)buffer; + //printk("malloc dst\n"); + dst = (u32*)vmalloc(size); + memset(dst, 0, size); + + if(!dst){ + *fw_buf=NULL; + __putname(path); + filp_close(fp,NULL); + fp=NULL; + vfree(buffer); + buffer=NULL; + return -1; + } + + for(i=0;i<(size/4);i++){ + dst[i] = src[i]; + } +#endif + + __putname(path); + filp_close(fp,NULL); + fp=NULL; + //vfree(buffer); + //buffer=NULL; + //*fw_buf = dst; + *fw_buf = (u32 *)buffer; + + MD5Init(&md5); + //MD5Update(&md5, (unsigned char *)dst, size); + MD5Update(&md5, (unsigned char *)buffer, size); + MD5Final(&md5, decrypt); + + printk(MD5PINRT, MD5(decrypt)); + + return size; +#endif + +} + +int rwnx_plat_bin_fw_upload_android(struct aic_usb_dev *usbdev, u32 fw_addr, + char *filename) +{ + struct device *dev = usbdev->dev; + unsigned int i=0; + int size; + u32 *dst=NULL; + int err=0; + + /* load aic firmware */ + size = aic_load_firmware(&dst, filename, dev); + if(size<=0){ + printk("wrong size of firmware file\n"); + vfree(dst); + dst = NULL; + return -1; + } + + /* Copy the file on the Embedded side */ + printk("### Upload %s firmware, @ = %x size=%d\n", filename, fw_addr, size); + + if (size > 1024) {// > 1KB data + for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB + err = rwnx_send_dbg_mem_block_write_req(usbdev, fw_addr + i, 1024, dst + i / 4); + if (err) { + printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + break; + } + } + } + + if (!err && (i < size)) {// <1KB data + err = rwnx_send_dbg_mem_block_write_req(usbdev, fw_addr + i, size - i, dst + i / 4); + if (err) { + printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + } + } + + if (dst) { + vfree(dst); + dst = NULL; + } + + printk("fw download complete\n\n"); + + return err; +} + +extern int testmode; +#ifdef CONFIG_M2D_OTA_AUTO_SUPPORT +int rwnx_plat_m2d_flash_ota_android(struct aic_usb_dev *usbdev, char *filename) +{ + struct device *dev = usbdev->dev; + unsigned int i=0; + int size; + u32 *dst=NULL; + int err=0; + int ret; + u8 bond_id; + const u32 mem_addr = 0x40500000; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + + ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr, &rd_mem_addr_cfm); + if (ret) { + printk("m2d %x rd fail: %d\n", mem_addr, ret); + return ret; + } + bond_id = (u8)(rd_mem_addr_cfm.memdata >> 24); + printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + if (bond_id & (1<<1)) { + //flash is invalid + printk("m2d flash is invalid\n"); + return -1; + } + + /* load aic firmware */ + size = aic_load_firmware(&dst, filename, dev); + if(size<=0){ + printk("wrong size of m2d file\n"); + vfree(dst); + dst = NULL; + return -1; + } + + /* Copy the file on the Embedded side */ + printk("### Upload m2d %s flash, size=%d\n", filename, size); + + /*send info first*/ + err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_INFO_ADDR, 4, (u32 *)&size); + + /*send data first*/ + if (size > 1024) {// > 1KB data + for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB + err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, 1024, dst + i / 4); + if (err) { + printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err); + break; + } + } + } + + if (!err && (i < size)) {// <1KB data + err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, size - i, dst + i / 4); + if (err) { + printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err); + } + } + + if (dst) { + vfree(dst); + dst = NULL; + } + testmode = FW_NORMAL_MODE; + + printk("m2d flash update complete\n\n"); + + return err; +} + +int rwnx_plat_m2d_flash_ota_check(struct aic_usb_dev *usbdev, char *filename) +{ + struct device *dev = usbdev->dev; + unsigned int i=0,j=0; + int size; + u32 *dst=NULL; + int err=0; + int ret=0; + u8 bond_id; + const u32 mem_addr = 0x40500000; + const u32 mem_addr_code_start = AIC_M2D_OTA_CODE_START_ADDR; + const u32 mem_addr_sdk_ver = AIC_M2D_OTA_VER_ADDR; + const u32 driver_code_start_idx = (AIC_M2D_OTA_CODE_START_ADDR-AIC_M2D_OTA_FLASH_ADDR)/4; + const u32 driver_sdk_ver_idx = (AIC_M2D_OTA_VER_ADDR-AIC_M2D_OTA_FLASH_ADDR)/4; + u32 driver_sdk_ver_addr_idx = 0; + u32 code_start_addr = 0xffffffff; + u32 sdk_ver_addr = 0xffffffff; + u32 drv_code_start_addr = 0xffffffff; + u32 drv_sdk_ver_addr = 0xffffffff; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + char m2d_sdk_ver[64]; + char flash_sdk_ver[64]; + u32 flash_ver[16]; + u32 ota_ver[16]; + + ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr, &rd_mem_addr_cfm); + if (ret) { + printk("m2d %x rd fail: %d\n", mem_addr, ret); + return ret; + } + bond_id = (u8)(rd_mem_addr_cfm.memdata >> 24); + printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + if (bond_id & (1<<1)) { + //flash is invalid + printk("m2d flash is invalid\n"); + return -1; + } + ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr_code_start, &rd_mem_addr_cfm); + if (ret){ + printk("mem_addr_code_start %x rd fail: %d\n", mem_addr_code_start, ret); + return ret; + } + code_start_addr = rd_mem_addr_cfm.memdata; + + ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr_sdk_ver, &rd_mem_addr_cfm); + if (ret){ + printk("mem_addr_sdk_ver %x rd fail: %d\n", mem_addr_code_start, ret); + return ret; + } + sdk_ver_addr = rd_mem_addr_cfm.memdata; + printk("code_start_addr: 0x%x, sdk_ver_addr: 0x%x\n", code_start_addr,sdk_ver_addr); + + /* load aic firmware */ + size = aic_load_firmware(&dst, filename, dev); + if(size<=0){ + printk("wrong size of m2d file\n"); + vfree(dst); + dst = NULL; + return -1; + } + if(code_start_addr == 0xffffffff && sdk_ver_addr == 0xffffffff) { + printk("########m2d flash old version , must be upgrade\n"); + drv_code_start_addr = dst[driver_code_start_idx]; + drv_sdk_ver_addr = dst[driver_sdk_ver_idx]; + + printk("drv_code_start_addr: 0x%x, drv_sdk_ver_addr: 0x%x\n", drv_code_start_addr,drv_sdk_ver_addr); + + if(drv_sdk_ver_addr == 0xffffffff){ + printk("########driver m2d_ota.bin is old ,not need upgrade\n"); + return -1; + } + + } else { + for(i=0;i<16;i++){ + ret = rwnx_send_dbg_mem_read_req(usbdev, (sdk_ver_addr+i*4), &rd_mem_addr_cfm); + if (ret){ + printk("mem_addr_sdk_ver %x rd fail: %d\n", mem_addr_code_start, ret); + return ret; + } + flash_ver[i] = rd_mem_addr_cfm.memdata; + } + memcpy((u8 *)flash_sdk_ver,(u8 *)flash_ver,64); + memcpy((u8 *)saved_sdk_ver,(u8 *)flash_sdk_ver,64); + printk("flash SDK Version: %s\r\n\r\n", flash_sdk_ver); + + drv_code_start_addr = dst[driver_code_start_idx]; + drv_sdk_ver_addr = dst[driver_sdk_ver_idx]; + + printk("drv_code_start_addr: 0x%x, drv_sdk_ver_addr: 0x%x\n", drv_code_start_addr,drv_sdk_ver_addr); + + if(drv_sdk_ver_addr == 0xffffffff){ + printk("########driver m2d_ota.bin is old ,not need upgrade\n"); + return -1; + } + + driver_sdk_ver_addr_idx = (drv_sdk_ver_addr-drv_code_start_addr)/4; + printk("driver_sdk_ver_addr_idx %d\n",driver_sdk_ver_addr_idx); + + if (driver_sdk_ver_addr_idx){ + for(j = 0; j < 16; j++){ + ota_ver[j] = dst[driver_sdk_ver_addr_idx+j]; + } + memcpy((u8 *)m2d_sdk_ver,(u8 *)ota_ver,64); + printk("m2d_ota SDK Version: %s\r\n\r\n", m2d_sdk_ver); + } else { + return -1; + } + + if(!strcmp(m2d_sdk_ver,flash_sdk_ver)){ + printk("######## m2d %s flash is not need upgrade\r\n", filename); + return -1; + } + } + + /* Copy the file on the Embedded side */ + printk("### Upload m2d %s flash, size=%d\n", filename, size); + + /*send info first*/ + err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_INFO_ADDR, 4, (u32 *)&size); + + /*send data first*/ + if (size > 1024) {// > 1KB data + for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB + err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, 1024, dst + i / 4); + if (err) { + printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err); + break; + } + } + } + + if (!err && (i < size)) {// <1KB data + err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, size - i, dst + i / 4); + if (err) { + printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err); + } + } + + if (dst) { + vfree(dst); + dst = NULL; + } + testmode = FW_NORMAL_MODE; + + printk("m2d flash update complete\n\n"); + + return err; +} +#endif//CONFIG_M2D_OTA_AUTO_SUPPORT + +int rwnx_plat_flash_bin_upload_android(struct aic_usb_dev *usbdev, u32 fw_addr, + char *filename) +{ + struct device *dev = usbdev->dev; + unsigned int i=0; + int size; + u32 *dst=NULL; + int err=0; + const u32 mem_addr = fw_addr; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + u32 crc = ~0UL; + + /* load aic firmware */ + size = aic_load_firmware(&dst, filename, dev); + flash_write_size = size; + if(size<=0){ + printk("wrong size of firmware file\n"); + vfree(dst); + dst = NULL; + return ENOENT; + } + + printk("size %x, flash_erase_len %x\n", size, flash_erase_len); + if (size != flash_erase_len || (flash_erase_len & 0xFFF)) { + printk("wrong size of flash_erase_len %d\n", flash_erase_len); + vfree(dst); + dst = NULL; + return -1; + } + + err = rwnx_send_dbg_mem_read_req(usbdev, mem_addr, &rd_mem_addr_cfm); + if (err) { + printk("%x rd fail: %d\n", mem_addr, err); + return err; + } + + if (rd_mem_addr_cfm.memdata != 0xffffffff) { + //erase flash + if (size > 0x40000) { + for (i = 0; i < (size - 0x40000); i +=0x40000) {//each time erase 256K + err = rwnx_send_dbg_mem_mask_write_req(usbdev, fw_addr+i, 0xf150e250, 0x40000); + if (err) { + printk("flash erase fail: %x, err:%d\r\n", fw_addr + i, err); + return err; + } + } + } + if (!err && (i < size)) {// <256KB data + err = rwnx_send_dbg_mem_mask_write_req(usbdev, fw_addr + i, 0xf150e250, size - i); + if (err) { + printk("flash erase fail: %x, err:%d\r\n", fw_addr + i, err); + } + } + } + + /* Copy the file on the Embedded side */ + printk("### Upload %s firmware, @ = %x size=%d\n", filename, fw_addr, size); + crc = aic_crc32((u8 *)dst, size, crc); + flash_write_bin_crc = crc; + + if (size > 1024) {// > 1KB data + for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB + err = rwnx_send_dbg_mem_block_write_req(usbdev, fw_addr + i, 1024, dst + i / 4); + if (err) { + printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + break; + } + } + } + + if (!err && (i < size)) {// <1KB data + err = rwnx_send_dbg_mem_block_write_req(usbdev, fw_addr + i, size - i, dst + i / 4); + if (err) { + printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err); + } + } + + if (dst) { + vfree(dst); + dst = NULL; + } + + printk("fw download complete\n\n"); + + return err; +} + + +uint32_t rwnx_atoli(char *value){ + int len = 0; + int temp_len = 0; + int i = 0; + uint32_t result = 0; + + temp_len = strlen(value); + + for(i = 0;i < temp_len; i++){ + if((value[i] >= 48 && value[i] <= 57) || + (value[i] >= 65 && value[i] <= 70) || + (value[i] >= 97 && value[i] <= 102)){ + len++; + } + } + + //printk("%s len:%d \r\n", __func__, len); + + for(i = 0; i < len; i++){ + result = result * 16; + if(value[i] >= 48 && value[i] <= 57){ + result += value[i] - 48; + }else if(value[i] >= 65 && value[i] <= 70){ + result += (value[i] - 65) + 10; + }else if(value[i] >= 97 && value[i] <= 102){ + result += (value[i] - 97) + 10; + } + } + + return result; +} + +int8_t rwnx_atoi(char *value){ + int len = 0; + int i = 0; + int8_t result = 0; + int8_t signal = 1; + + len = strlen(value); + //printk("%s len:%d \r\n", __func__, len); + + for(i = 0;i < len ;i++){ + if(i == 0 && value[0] == '-'){ + signal = -1; + continue; + } + + result = result * 10; + if(value[i] >= 48 && value[i] <= 57){ + result += value[i] - 48; + }else{ + result = 0; + break; + } + } + + result = result * signal; + //printk("%s result:%d \r\n", __func__, result); + + return result; +} + +void get_fw_path(char* fw_path){ + if (strlen(aic_fw_path) > 0) { + memcpy(fw_path, aic_fw_path, strlen(aic_fw_path)); + }else{ + memcpy(fw_path, aic_default_fw_path, strlen(aic_default_fw_path)); + } +} + +void set_testmode(int val){ + testmode = val; +} + +int get_testmode(void){ + return testmode; +} + +int get_hardware_info(void){ + return AIC_HW_INFO; +} + +extern int adap_test; +int get_adap_test(void){ + return adap_test; +} + +int get_flash_bin_size(void) +{ + return flash_write_size; +} + +u32 get_flash_bin_crc(void) +{ + return flash_write_bin_crc; +} + +EXPORT_SYMBOL(get_fw_path); + +EXPORT_SYMBOL(get_testmode); + +EXPORT_SYMBOL(set_testmode); + +EXPORT_SYMBOL(get_hardware_info); + +EXPORT_SYMBOL(get_adap_test); + +EXPORT_SYMBOL(get_flash_bin_size); +EXPORT_SYMBOL(get_flash_bin_crc); + + +void get_userconfig_xtal_cap(xtal_cap_conf_t *xtal_cap) +{ + xtal_cap->enable = userconfig_xtal_cap.enable; + xtal_cap->xtal_cap = userconfig_xtal_cap.xtal_cap; + xtal_cap->xtal_cap_fine = userconfig_xtal_cap.xtal_cap_fine; + + printk("%s:enable :%d\r\n", __func__, xtal_cap->enable); + printk("%s:xtal_cap :%d\r\n", __func__, xtal_cap->xtal_cap); + printk("%s:xtal_cap_fine:%d\r\n", __func__, xtal_cap->xtal_cap_fine); +} + +EXPORT_SYMBOL(get_userconfig_xtal_cap); + +void get_userconfig_txpwr_idx(txpwr_idx_conf_t *txpwr_idx){ + txpwr_idx->enable = userconfig_txpwr_idx.enable; + txpwr_idx->dsss = userconfig_txpwr_idx.dsss; + txpwr_idx->ofdmlowrate_2g4 = userconfig_txpwr_idx.ofdmlowrate_2g4; + txpwr_idx->ofdm64qam_2g4 = userconfig_txpwr_idx.ofdm64qam_2g4; + txpwr_idx->ofdm256qam_2g4 = userconfig_txpwr_idx.ofdm256qam_2g4; + txpwr_idx->ofdm1024qam_2g4 = userconfig_txpwr_idx.ofdm1024qam_2g4; + txpwr_idx->ofdmlowrate_5g = userconfig_txpwr_idx.ofdmlowrate_5g; + txpwr_idx->ofdm64qam_5g = userconfig_txpwr_idx.ofdm64qam_5g; + txpwr_idx->ofdm256qam_5g = userconfig_txpwr_idx.ofdm256qam_5g; + txpwr_idx->ofdm1024qam_5g = userconfig_txpwr_idx.ofdm1024qam_5g; + + printk("%s:enable:%d\r\n", __func__, txpwr_idx->enable); + printk("%s:dsss:%d\r\n", __func__, txpwr_idx->dsss); + printk("%s:ofdmlowrate_2g4:%d\r\n", __func__, txpwr_idx->ofdmlowrate_2g4); + printk("%s:ofdm64qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm64qam_2g4); + printk("%s:ofdm256qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm256qam_2g4); + printk("%s:ofdm1024qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm1024qam_2g4); + printk("%s:ofdmlowrate_5g:%d\r\n", __func__, txpwr_idx->ofdmlowrate_5g); + printk("%s:ofdm64qam_5g:%d\r\n", __func__, txpwr_idx->ofdm64qam_5g); + printk("%s:ofdm256qam_5g:%d\r\n", __func__, txpwr_idx->ofdm256qam_5g); + printk("%s:ofdm1024qam_5g:%d\r\n", __func__, txpwr_idx->ofdm1024qam_5g); + +} + +EXPORT_SYMBOL(get_userconfig_txpwr_idx); + +void get_userconfig_txpwr_ofst(txpwr_ofst_conf_t *txpwr_ofst){ + txpwr_ofst->enable = userconfig_txpwr_ofst.enable; + txpwr_ofst->chan_1_4 = userconfig_txpwr_ofst.chan_1_4; + txpwr_ofst->chan_5_9 = userconfig_txpwr_ofst.chan_5_9; + txpwr_ofst->chan_10_13 = userconfig_txpwr_ofst.chan_10_13; + txpwr_ofst->chan_36_64 = userconfig_txpwr_ofst.chan_36_64; + txpwr_ofst->chan_100_120 = userconfig_txpwr_ofst.chan_100_120; + txpwr_ofst->chan_122_140 = userconfig_txpwr_ofst.chan_122_140; + txpwr_ofst->chan_142_165 = userconfig_txpwr_ofst.chan_142_165; + + printk("%s:ofst_enable:%d\r\n", __func__, txpwr_ofst->enable); + printk("%s:ofst_chan_1_4:%d\r\n", __func__, txpwr_ofst->chan_1_4); + printk("%s:ofst_chan_5_9:%d\r\n", __func__, txpwr_ofst->chan_5_9); + printk("%s:ofst_chan_10_13:%d\r\n", __func__, txpwr_ofst->chan_10_13); + printk("%s:ofst_chan_36_64:%d\r\n", __func__, txpwr_ofst->chan_36_64); + printk("%s:ofst_chan_100_120:%d\r\n", __func__, txpwr_ofst->chan_100_120); + printk("%s:ofst_chan_122_140:%d\r\n", __func__, txpwr_ofst->chan_122_140); + printk("%s:ofst_chan_142_165:%d\r\n", __func__, txpwr_ofst->chan_142_165); + +} + +EXPORT_SYMBOL(get_userconfig_txpwr_ofst); + +void rwnx_plat_userconfig_set_value(char *command, char *value){ + //TODO send command + printk("%s:command=%s value=%s \r\n", __func__, command, value); + if(!strcmp(command, "enable")){ + userconfig_txpwr_idx.enable = rwnx_atoi(value); + }else if(!strcmp(command, "dsss")){ + userconfig_txpwr_idx.dsss = rwnx_atoi(value); + }else if(!strcmp(command, "ofdmlowrate_2g4")){ + userconfig_txpwr_idx.ofdmlowrate_2g4 = rwnx_atoi(value); + }else if(!strcmp(command, "ofdm64qam_2g4")){ + userconfig_txpwr_idx.ofdm64qam_2g4 = rwnx_atoi(value); + }else if(!strcmp(command, "ofdm256qam_2g4")){ + userconfig_txpwr_idx.ofdm256qam_2g4 = rwnx_atoi(value); + }else if(!strcmp(command, "ofdm1024qam_2g4")){ + userconfig_txpwr_idx.ofdm1024qam_2g4 = rwnx_atoi(value); + }else if(!strcmp(command, "ofdmlowrate_5g")){ + userconfig_txpwr_idx.ofdmlowrate_5g = rwnx_atoi(value); + }else if(!strcmp(command, "ofdm64qam_5g")){ + userconfig_txpwr_idx.ofdm64qam_5g = rwnx_atoi(value); + }else if(!strcmp(command, "ofdm256qam_5g")){ + userconfig_txpwr_idx.ofdm256qam_5g = rwnx_atoi(value); + }else if(!strcmp(command, "ofdm1024qam_5g")){ + userconfig_txpwr_idx.ofdm1024qam_5g = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_enable")){ + userconfig_txpwr_ofst.enable = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_chan_1_4")){ + userconfig_txpwr_ofst.chan_1_4 = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_chan_5_9")){ + userconfig_txpwr_ofst.chan_5_9 = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_chan_10_13")){ + userconfig_txpwr_ofst.chan_10_13 = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_chan_36_64")){ + userconfig_txpwr_ofst.chan_36_64 = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_chan_100_120")){ + userconfig_txpwr_ofst.chan_100_120 = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_chan_122_140")){ + userconfig_txpwr_ofst.chan_122_140 = rwnx_atoi(value); + }else if(!strcmp(command, "ofst_chan_142_165")){ + userconfig_txpwr_ofst.chan_142_165 = rwnx_atoi(value); + }else if(!strcmp(command, "xtal_enable")){ + userconfig_xtal_cap.enable = rwnx_atoi(value); + }else if(!strcmp(command, "xtal_cap")){ + userconfig_xtal_cap.xtal_cap = rwnx_atoi(value); + }else if(!strcmp(command, "xtal_cap_fine")){ + userconfig_xtal_cap.xtal_cap_fine = rwnx_atoi(value); + } +} + +void rwnx_plat_userconfig_parsing(char *buffer, int size){ + int i = 0; + int parse_state = 0; + char command[30]; + char value[100]; + int char_counter = 0; + + memset(command, 0, 30); + memset(value, 0, 100); + + for(i = 0; i < size; i++){ + + //Send command or print nvram log when char is \r or \n + if(buffer[i] == 0x0a || buffer[i] == 0x0d){ + if(command[0] != 0 && value[0] != 0){ + if(parse_state == PRINT){ + printk("%s:%s\r\n", __func__, value); + }else if(parse_state == GET_VALUE){ + rwnx_plat_userconfig_set_value(command, value); + } + } + //Reset command value and char_counter + memset(command, 0, 30); + memset(value, 0, 100); + char_counter = 0; + parse_state = INIT; + continue; + } + + //Switch parser state + if(parse_state == INIT){ + if(buffer[i] == '#'){ + parse_state = PRINT; + continue; + }else if(buffer[i] == 0x0a || buffer[i] == 0x0d){ + parse_state = INIT; + continue; + }else{ + parse_state = CMD; + } + } + + //Fill data to command and value + if(parse_state == PRINT){ + command[0] = 0x01; + value[char_counter] = buffer[i]; + char_counter++; + }else if(parse_state == CMD){ + if(command[0] != 0 && buffer[i] == '='){ + parse_state = GET_VALUE; + char_counter = 0; + continue; + } + command[char_counter] = buffer[i]; + char_counter++; + }else if(parse_state == GET_VALUE){ + value[char_counter] = buffer[i]; + char_counter++; + } + } + + +} + +int rwnx_plat_userconfig_upload_android(struct aic_usb_dev *usbdev, char *filename){ + int size; + u32 *dst=NULL; + struct device *dev = usbdev->dev; + + printk("userconfig file path:%s \r\n", filename); + + /* load aic firmware */ + size = aic_load_firmware(&dst, filename, dev); + if(size <= 0){ + printk("wrong size of firmware file\n"); + vfree(dst); + dst = NULL; + return 0; + } + + /* Copy the file on the Embedded side */ + printk("### Upload %s userconfig, size=%d\n", filename, size); + + rwnx_plat_userconfig_parsing((char *)dst, size); + + if (dst) { + vfree(dst); + dst = NULL; + } + + printk("userconfig download complete\n\n"); + return 0; +} + + + +int aicbt_patch_table_free(struct aicbt_patch_table *head) +{ + struct aicbt_patch_table *p = head, *n = NULL; + while (p) { + n = p->next; + vfree(p->name); + vfree(p->data); + vfree(p); + p = n; + } + head = NULL; + return 0; +} + +struct aicbt_patch_table *aicbt_patch_table_alloc(struct aic_usb_dev *usbdev,const char *filename) +{ + struct device *dev = usbdev->dev; + struct aicbt_patch_table *head = NULL; + struct aicbt_patch_table *new = NULL; + struct aicbt_patch_table *cur = NULL; + int size; + int ret = 0; + uint8_t *rawdata=NULL; + uint8_t *p = NULL; + + /* load aic firmware */ + size = aic_load_firmware((u32 **)&rawdata, filename, dev); + + /* Copy the file on the Embedded side */ + printk("### Upload %s fw_patch_table, size=%d\n", filename, size); + + if (size <= 0) { + printk("wrong size of firmware file\n"); + ret = -1; + goto err; + } + + p = rawdata; + + if (memcmp(p, AICBT_PT_TAG, sizeof(AICBT_PT_TAG) < 16 ? sizeof(AICBT_PT_TAG) : 16)) { + printk("TAG err\n"); + ret = -1; + goto err; + } + p += 16; + + while (p - rawdata < size) { + //printk("size = %d p - rawdata = %d \r\n", size, p - rawdata); + new = (struct aicbt_patch_table *)vmalloc(sizeof(struct aicbt_patch_table)); + memset(new, 0, sizeof(struct aicbt_patch_table)); + if (head == NULL) { + head = new; + cur = new; + } else { + cur->next = new; + cur = cur->next; + } + + cur->name = (char *)vmalloc(sizeof(char) * 16); + memset(cur->name, 0, sizeof(char) * 16); + memcpy(cur->name, p, 16); + p += 16; + + cur->type = *(uint32_t *)p; + p += 4; + + cur->len = *(uint32_t *)p; + p += 4; + + if((cur->type ) >= 1000 || cur->len == 0) {//Temp Workaround + cur->len = 0; + }else{ + cur->data = (uint32_t *)vmalloc(sizeof(uint8_t) * cur->len * 8); + memset(cur->data, 0, sizeof(uint8_t) * cur->len * 8); + memcpy(cur->data, p, cur->len * 8); + p += cur->len * 8; + } + } + + vfree(rawdata); + + return head; + +err: + aicbt_patch_table_free(head); + if (rawdata) + vfree(rawdata); + return NULL; +} + +struct aicbsp_info_t aicbsp_info = { + .hwinfo = AICBSP_HWINFO_DEFAULT, + .cpmode = AICBSP_CPMODE_DEFAULT, +}; + + + +static struct aicbt_info_t aicbt_info[] = { + { + .btmode = AICBT_BTMODE_DEFAULT, + .btport = AICBT_BTPORT_DEFAULT, + .uart_baud = AICBT_UART_BAUD_DEFAULT, + .uart_flowctrl = AICBT_UART_FC_DEFAULT, + .lpm_enable = AICBT_LPM_ENABLE_DEFAULT, + .txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT, + },//PRODUCT_ID_AIC8800 + { + },//PRODUCT_ID_AIC8801 + { + },//PRODUCT_ID_AIC8800DC + { + },//PRODUCT_ID_AIC8800DW + { + .btmode = AICBT_BTMODE_DEFAULT_8800d80, + .btport = AICBT_BTPORT_DEFAULT, + .uart_baud = AICBT_UART_BAUD_DEFAULT, + .uart_flowctrl = AICBT_UART_FC_DEFAULT, + .lpm_enable = AICBT_LPM_ENABLE_DEFAULT, + .txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT_8800d80, + },//PRODUCT_ID_AIC8800D80 + { + },//PRODUCT_ID_AIC8800D81 + { + .btmode = AICBT_BTMODE_DEFAULT_8800d80x2, + .btport = AICBT_BTPORT_DEFAULT, + .uart_baud = AICBT_UART_BAUD_DEFAULT, + .uart_flowctrl = AICBT_UART_FC_DEFAULT, + .lpm_enable = AICBT_LPM_ENABLE_DEFAULT, + .txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT_8800d80x2, + },//PRODUCT_ID_AIC8800D80X2 +}; + +#ifdef CONFIG_LOAD_BT_CONF +static const char *aicbt_find_tag(const u8 *file_data, unsigned int file_size, + const char *tag_name, unsigned int tag_len) +{ + unsigned int line_start = 0, tag_name_len = strlen(tag_name); + const char *comment_symbols = "#;"; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + while (line_start < file_size) { + unsigned int line_end = line_start; + + while (line_end < file_size && file_data[line_end] != '\n' && file_data[line_end] != '\r') { + line_end++; + } + + if (line_end - line_start >= tag_name_len && + !strncmp((const char*)&file_data[line_start], tag_name, tag_name_len)) + { + const char *value_start = (const char*)&file_data[line_start + tag_name_len]; + const char *value_end = (const char*)&file_data[line_end]; + + while (value_start < value_end && (*value_start == ' ' || *value_start == '=')) { + value_start++; + } + const char *comment_pos = value_start; + while (comment_pos < value_end && !strchr(comment_symbols, *comment_pos)) { + comment_pos++; + } + while (comment_pos > value_start && (*(comment_pos-1) == ' ' || *(comment_pos-1) == '\t')) { + comment_pos--; + } + if (comment_pos > value_start) { + return value_start; + } + } + + line_start = line_end; + while (line_start < file_size && (file_data[line_start] == '\n' || file_data[line_start] == '\r')) { + line_start++; + } + } + return NULL; +} + +void aicbt_parse_config(struct aic_usb_dev *usbdev, const char *filename) +{ + struct device *dev = usbdev->dev; + u32 *dst = NULL; + int size; + const u8 *tag_ptr; + u32 tmp_val; + //char *filename = "aicbt.conf"; + + RWNX_DBG(RWNX_FN_ENTRY_STR); + + size = aic_load_firmware((u32 **)&dst, filename, dev); + if (size <= 0) { + AICWFDBG(LOGERROR, "%s: load %s fail (%d)\n", __func__, filename, size); + return; + } + + tag_ptr = aicbt_find_tag((char*)dst, size, "BTMODE=", strlen("0")); + if (tag_ptr) { + if (sscanf(tag_ptr, "%x", &aicbt_info[usbdev->chipid].btmode) != 1 || + aicbt_info[usbdev->chipid].btmode > AICBT_BTMODE_BT_ONLY_COANT) { + aicbt_info[usbdev->chipid].btmode = AICBT_BTMODE_DEFAULT; + AICWFDBG(LOGERROR, "BTMODE invalid, use default %02X\n", AICBT_BTMODE_DEFAULT); + } + } else { + aicbt_info[usbdev->chipid].btmode = AICBT_BTMODE_DEFAULT; + } + + tag_ptr = aicbt_find_tag((char*)dst, size, "BTPORT=", strlen("0")); + if (tag_ptr) { + if (sscanf(tag_ptr, "%x", &aicbt_info[usbdev->chipid].btport) != 1 || + aicbt_info[usbdev->chipid].btport > AICBT_BTPORT_UART) { + aicbt_info[usbdev->chipid].btport = AICBT_BTPORT_DEFAULT; + } + } else { + aicbt_info[usbdev->chipid].btport = AICBT_BTPORT_DEFAULT; + } + + tag_ptr = aicbt_find_tag((char*)dst, size, "UART_BAUD=", 0); + if (tag_ptr) { + if (sscanf(tag_ptr, "%u", &tmp_val) == 1) { + if(tmp_val >= AICBT_UART_BAUD_115200 && tmp_val <= AICBT_UART_BAUD_3_25M) { + aicbt_info[usbdev->chipid].uart_baud = tmp_val; + } else { + aicbt_info[usbdev->chipid].uart_baud = AICBT_UART_BAUD_DEFAULT; + AICWFDBG(LOGERROR, "UART_BAUD %u invalid, use default %d\n", tmp_val, AICBT_UART_BAUD_DEFAULT); + } + } else { + aicbt_info[usbdev->chipid].uart_baud = AICBT_UART_BAUD_DEFAULT; + } + } else { + aicbt_info[usbdev->chipid].uart_baud = AICBT_UART_BAUD_DEFAULT; + } + tag_ptr = aicbt_find_tag((char*)dst, size, "UART_FC=", strlen("0")); + if (tag_ptr) { + if (sscanf(tag_ptr, "%x", &aicbt_info[usbdev->chipid].uart_flowctrl) != 1 || + aicbt_info[usbdev->chipid].uart_flowctrl > AICBT_UART_FLOWCTRL_ENABLE) { + aicbt_info[usbdev->chipid].uart_flowctrl = AICBT_UART_FC_DEFAULT; + } + } else { + aicbt_info[usbdev->chipid].uart_flowctrl = AICBT_UART_FC_DEFAULT; + } + + tag_ptr = aicbt_find_tag((char*)dst, size, "LPM_ENABLE=", strlen("0")); + if (tag_ptr) { + if (sscanf(tag_ptr, "%x", &aicbt_info[usbdev->chipid].lpm_enable) != 1 || + aicbt_info[usbdev->chipid].lpm_enable > 1) { + aicbt_info[usbdev->chipid].lpm_enable = AICBT_LPM_ENABLE_DEFAULT; + } + } else { + aicbt_info[usbdev->chipid].lpm_enable = AICBT_LPM_ENABLE_DEFAULT; + } + tag_ptr = aicbt_find_tag((char*)dst, size, "TXPWR_LVL=", strlen("0x6F2F")); + if (tag_ptr) { + if (sscanf(tag_ptr, "%08x", &tmp_val) == 1) { + if (tmp_val >= 0 || tmp_val <= 0X7F7F) { + aicbt_info[usbdev->chipid].txpwr_lvl = tmp_val; + } else { + aicbt_info[usbdev->chipid].txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT; + } + } else { + aicbt_info[usbdev->chipid].txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT; + } + } else { + aicbt_info[usbdev->chipid].txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT; + } + vfree(dst); + AICWFDBG(LOGINFO, "%s: btmode %d btport %d uart baud %d uart fc %d lpm %d txpwrlvl %4X\n", + __func__, + aicbt_info[usbdev->chipid].btmode, + aicbt_info[usbdev->chipid].btport, + aicbt_info[usbdev->chipid].uart_baud, + aicbt_info[usbdev->chipid].uart_flowctrl, + aicbt_info[usbdev->chipid].lpm_enable, + aicbt_info[usbdev->chipid].txpwr_lvl); +} +#endif + +int aicbt_patch_table_load(struct aic_usb_dev *usbdev, struct aicbt_patch_table *_head) +{ + struct aicbt_patch_table *head, *p; + int ret = 0, i; + uint32_t *data = NULL; + + head = _head; + + for (p = head; p != NULL; p = p->next) { + data = p->data; + if(AICBT_PT_BTMODE == p->type){ + *(data + 1) = aicbsp_info.hwinfo < 0; + *(data + 3) = aicbsp_info.hwinfo; + *(data + 5) = aicbsp_info.cpmode; + + *(data + 7) = aicbt_info[usbdev->chipid].btmode; + *(data + 9) = aicbt_info[usbdev->chipid].btport; + *(data + 11) = aicbt_info[usbdev->chipid].uart_baud; + *(data + 13) = aicbt_info[usbdev->chipid].uart_flowctrl; + *(data + 15) = aicbt_info[usbdev->chipid].lpm_enable; + *(data + 17) = aicbt_info[usbdev->chipid].txpwr_lvl; + + printk("%s bt btmode[%d]:%d\r\n", __func__, usbdev->chipid, aicbt_info[usbdev->chipid].btmode); + printk("%s bt btport[%d]:%d\r\n", __func__, usbdev->chipid, aicbt_info[usbdev->chipid].btport); + printk("%s bt uart_baud[%d]:%d\r\n", __func__, usbdev->chipid, aicbt_info[usbdev->chipid].uart_baud); + printk("%s bt uart_flowctrl[%d]:%d\r\n", __func__, usbdev->chipid, aicbt_info[usbdev->chipid].uart_flowctrl); + printk("%s bt lpm_enable[%d]:%d\r\n", __func__, usbdev->chipid, aicbt_info[usbdev->chipid].lpm_enable); + printk("%s bt tx_pwr[%d]:%4X\r\n", __func__, usbdev->chipid, aicbt_info[usbdev->chipid].txpwr_lvl); + + } + if (p->type == 0x06) { + char *data_s = (char *)p->data; + printk("patch version %s\n", data_s); + continue; + } + for (i = 0; i < p->len; i++) { + ret = rwnx_send_dbg_mem_write_req(usbdev, *data, *(data + 1)); + if (ret != 0) + return ret; + data += 2; + } + if (p->type == AICBT_PT_PWRON) + udelay(500); + } + aicbt_patch_table_free(head); + return 0; +} + +int aicbt_patch_info_unpack(struct aicbt_patch_info_t *patch_info, struct aicbt_patch_table *head_t) +{ + uint8_t *patch_info_array = (uint8_t*)patch_info; + int base_len = 0; + int memcpy_len = 0; + + if (AICBT_PT_INF == head_t->type) { + base_len = ((offsetof(struct aicbt_patch_info_t, ext_patch_nb_addr) - offsetof(struct aicbt_patch_info_t, adid_addrinf) )/sizeof(uint32_t))/2; + AICWFDBG(LOGDEBUG, "%s head_t->len:%d base_len:%d \r\n", __func__, head_t->len, base_len); + + if (head_t->len > base_len){ + patch_info->info_len = base_len; + memcpy_len = patch_info->info_len + 1;//include ext patch nb + } else{ + patch_info->info_len = head_t->len; + memcpy_len = patch_info->info_len; + } + head_t->len = patch_info->info_len; + AICWFDBG(LOGDEBUG, "%s memcpy_len:%d \r\n", __func__, memcpy_len); + + if (patch_info->info_len == 0) + return 0; + + memcpy(((patch_info_array) + sizeof(patch_info->info_len)), + head_t->data, + memcpy_len * sizeof(uint32_t) * 2); + AICWFDBG(LOGDEBUG, "%s adid_addrinf:%x addr_adid:%x \r\n", __func__, + ((struct aicbt_patch_info_t *)patch_info_array)->adid_addrinf, + ((struct aicbt_patch_info_t *)patch_info_array)->addr_adid); + + if (patch_info->ext_patch_nb > 0){ + int index = 0; + patch_info->ext_patch_param = (uint32_t *)(head_t->data + ((memcpy_len) * 2)); + + for(index = 0; index < patch_info->ext_patch_nb; index++){ + AICWFDBG(LOGDEBUG, "%s id:%x addr:%x \r\n", __func__, + *(patch_info->ext_patch_param + (index * 2)), + *(patch_info->ext_patch_param + (index * 2) + 1)); + } + } + + } + return 0; + +} + +int rwnx_plat_bin_fw_patch_table_upload_android(struct aic_usb_dev *usbdev, char *filename){ + struct device *dev = usbdev->dev; + struct aicbt_patch_table *head = NULL; + struct aicbt_patch_table *new = NULL; + struct aicbt_patch_table *cur = NULL; + int size; + int ret = 0; + uint8_t *rawdata=NULL; + uint8_t *p = NULL; + + /* load aic firmware */ + size = aic_load_firmware((u32 **)&rawdata, filename, dev); + + /* Copy the file on the Embedded side */ + printk("### Upload %s fw_patch_table, size=%d\n", filename, size); + + if (size <= 0) { + printk("wrong size of firmware file\n"); + ret = -1; + goto err; + } + + p = rawdata; + + if (memcmp(p, AICBT_PT_TAG, sizeof(AICBT_PT_TAG) < 16 ? sizeof(AICBT_PT_TAG) : 16)) { + printk("TAG err\n"); + ret = -1; + goto err; + } + p += 16; + + while (p - rawdata < size) { + //printk("size = %d p - rawdata = %d \r\n", size, p - rawdata); + new = (struct aicbt_patch_table *)vmalloc(sizeof(struct aicbt_patch_table)); + memset(new, 0, sizeof(struct aicbt_patch_table)); + if (head == NULL) { + head = new; + cur = new; + } else { + cur->next = new; + cur = cur->next; + } + + cur->name = (char *)vmalloc(sizeof(char) * 16); + memset(cur->name, 0, sizeof(char) * 16); + memcpy(cur->name, p, 16); + p += 16; + + cur->type = *(uint32_t *)p; + p += 4; + + cur->len = *(uint32_t *)p; + p += 4; + + if((cur->type ) >= 1000 || cur->len == 0) {//Temp Workaround + cur->len = 0; + }else{ + cur->data = (uint32_t *)vmalloc(sizeof(uint8_t) * cur->len * 8); + memset(cur->data, 0, sizeof(uint8_t) * cur->len * 8); + memcpy(cur->data, p, cur->len * 8); + p += cur->len * 8; + } + } + + vfree(rawdata); + aicbt_patch_table_load(usbdev, head); + printk("fw_patch_table download complete\n\n"); + + return ret; +err: + //aicbt_patch_table_free(&head); + + if (rawdata){ + vfree(rawdata); + } + return ret; +} + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth.h new file mode 100644 index 0000000..786b94a --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth.h @@ -0,0 +1,39 @@ +#ifndef _AICBLUETOOTH_H +#define _AICBLUETOOTH_H + +struct aicbt_patch_table { + char *name; + uint32_t type; + uint32_t *data; + uint32_t len; + struct aicbt_patch_table *next; +}; + + +int aic_bt_platform_init(struct aic_usb_dev *sdiodev); + +void aic_bt_platform_deinit(struct aic_usb_dev *sdiodev); + +int rwnx_plat_bin_fw_upload_android(struct aic_usb_dev *sdiodev, u32 fw_addr, + char *filename); + +int rwnx_plat_m2d_flash_ota_android(struct aic_usb_dev *usbdev, char *filename); + +int rwnx_plat_m2d_flash_ota_check(struct aic_usb_dev *usbdev, char *filename); + +int rwnx_plat_bin_fw_patch_table_upload_android(struct aic_usb_dev *usbdev, char *filename); + +int rwnx_plat_userconfig_upload_android(struct aic_usb_dev *usbdev, char *filename); +int rwnx_plat_flash_bin_upload_android(struct aic_usb_dev *usbdev, u32 fw_addr, char *filename); + +int8_t rwnx_atoi(char *value); +uint32_t rwnx_atoli(char *value); +int aicbt_patch_table_free(struct aicbt_patch_table *head); +struct aicbt_patch_table *aicbt_patch_table_alloc(struct aic_usb_dev *usbdev, const char *filename); +#ifdef CONFIG_LOAD_BT_CONF +void aicbt_parse_config(struct aic_usb_dev *usbdev, const char *filename); +#endif +int aicbt_patch_info_unpack(struct aicbt_patch_info_t *patch_info, struct aicbt_patch_table *head_t); +int aicbt_patch_table_load(struct aic_usb_dev *usbdev, struct aicbt_patch_table *_head); + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth_cmds.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth_cmds.c new file mode 100644 index 0000000..229b5ce --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth_cmds.c @@ -0,0 +1,469 @@ +/** + ****************************************************************************** + * + * rwnx_cmds.c + * + * Handles queueing (push to IPC, ack/cfm from IPC) of commands issued to + * LMAC FW + * + * Copyright (C) RivieraWaves 2014-2019 + * + ****************************************************************************** + */ + +#include +//#include +#include "aicbluetooth_cmds.h" +#include "aic_txrxif.h" +#include "aicwf_usb.h" + +//extern int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val); + +static void cmd_dump(const struct rwnx_cmd *cmd) +{ + printk(KERN_CRIT "tkn[%d] flags:%04x result:%3d cmd:%4d - reqcfm(%4d)\n", + cmd->tkn, cmd->flags, cmd->result, cmd->id, cmd->reqid); +} + +static void cmd_complete(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd) +{ + //printk("cmdcmp\n"); + lockdep_assert_held(&cmd_mgr->lock); + + list_del(&cmd->list); + cmd_mgr->queue_sz--; + + cmd->flags |= RWNX_CMD_FLAG_DONE; + if (cmd->flags & RWNX_CMD_FLAG_NONBLOCK) { + kfree(cmd); + } else { + if (RWNX_CMD_WAIT_COMPLETE(cmd->flags)) { + cmd->result = 0; + complete(&cmd->complete); + } + } +} + +static int cmd_mgr_queue(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd) +{ + bool defer_push = false; + + spin_lock_bh(&cmd_mgr->lock); + + if (cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) { + printk(KERN_CRIT"cmd queue crashed\n"); + cmd->result = -EPIPE; + spin_unlock_bh(&cmd_mgr->lock); + return -EPIPE; + } + + if (!list_empty(&cmd_mgr->cmds)) { + struct rwnx_cmd *last; + + if (cmd_mgr->queue_sz == cmd_mgr->max_queue_sz) { + printk(KERN_CRIT"Too many cmds (%d) already queued\n", + cmd_mgr->max_queue_sz); + cmd->result = -ENOMEM; + spin_unlock_bh(&cmd_mgr->lock); + return -ENOMEM; + } + last = list_entry(cmd_mgr->cmds.prev, struct rwnx_cmd, list); + if (last->flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_PUSH)) { + cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH; + defer_push = true; + } + } + + if (cmd->flags & RWNX_CMD_FLAG_REQ_CFM) + cmd->flags |= RWNX_CMD_FLAG_WAIT_CFM; + + cmd->tkn = cmd_mgr->next_tkn++; + cmd->result = -EINTR; + + if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK)) + init_completion(&cmd->complete); + + list_add_tail(&cmd->list, &cmd_mgr->cmds); + cmd_mgr->queue_sz++; + spin_unlock_bh(&cmd_mgr->lock); + + if (!defer_push) { + //printk("queue:id=%x, param_len=%u\n",cmd->a2e_msg->id, cmd->a2e_msg->param_len); + aicwf_set_cmd_tx((void *)(cmd_mgr->usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len); + kfree(cmd->a2e_msg); + } else { + printk("ERR: never defer push!!!!"); + return 0; + } + + if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK)) { + unsigned long tout = msecs_to_jiffies(RWNX_80211_CMD_TIMEOUT_MS * cmd_mgr->queue_sz); + if (!wait_for_completion_killable_timeout(&cmd->complete, tout)) { + printk(KERN_CRIT"cmd timed-out\n"); + + cmd_dump(cmd); + spin_lock_bh(&cmd_mgr->lock); + cmd_mgr->state = RWNX_CMD_MGR_STATE_CRASHED; + if (!(cmd->flags & RWNX_CMD_FLAG_DONE)) { + cmd->result = -ETIMEDOUT; + cmd_complete(cmd_mgr, cmd); + } + spin_unlock_bh(&cmd_mgr->lock); + } + else{ + kfree(cmd); + } + } else { + cmd->result = 0; + } + + return 0; +} + +static int cmd_mgr_run_callback(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd, + struct rwnx_cmd_e2amsg *msg, msg_cb_fct cb) +{ + int res; + + if (! cb){ + return 0; + } + spin_lock(&cmd_mgr->cb_lock); + res = cb(cmd, msg); + spin_unlock(&cmd_mgr->cb_lock); + + return res; +} + +static int cmd_mgr_msgind(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd_e2amsg *msg, + msg_cb_fct cb) +{ + struct rwnx_cmd *cmd; + bool found = false; + + //printk("cmd->id=%x\n", msg->id); + spin_lock(&cmd_mgr->lock); + list_for_each_entry(cmd, &cmd_mgr->cmds, list) { + if (cmd->reqid == msg->id && + (cmd->flags & RWNX_CMD_FLAG_WAIT_CFM)) { + + if (!cmd_mgr_run_callback(cmd_mgr, cmd, msg, cb)) { + found = true; + cmd->flags &= ~RWNX_CMD_FLAG_WAIT_CFM; + + if (WARN((msg->param_len > RWNX_CMD_E2AMSG_LEN_MAX), + "Unexpect E2A msg len %d > %d\n", msg->param_len, + RWNX_CMD_E2AMSG_LEN_MAX)) { + msg->param_len = RWNX_CMD_E2AMSG_LEN_MAX; + } + + if (cmd->e2a_msg && msg->param_len) + memcpy(cmd->e2a_msg, &msg->param, msg->param_len); + + if (RWNX_CMD_WAIT_COMPLETE(cmd->flags)) + cmd_complete(cmd_mgr, cmd); + + break; + } + } + } + spin_unlock(&cmd_mgr->lock); + + if (!found) + cmd_mgr_run_callback(cmd_mgr, NULL, msg, cb); + + return 0; +} + +static void cmd_mgr_print(struct rwnx_cmd_mgr *cmd_mgr) +{ + struct rwnx_cmd *cur; + + spin_lock_bh(&cmd_mgr->lock); + printk("q_sz/max: %2d / %2d - next tkn: %d\n", + cmd_mgr->queue_sz, cmd_mgr->max_queue_sz, + cmd_mgr->next_tkn); + list_for_each_entry(cur, &cmd_mgr->cmds, list) { + cmd_dump(cur); + } + spin_unlock_bh(&cmd_mgr->lock); +} + +static void cmd_mgr_drain(struct rwnx_cmd_mgr *cmd_mgr) +{ + struct rwnx_cmd *cur, *nxt; + + spin_lock_bh(&cmd_mgr->lock); + list_for_each_entry_safe(cur, nxt, &cmd_mgr->cmds, list) { + list_del(&cur->list); + cmd_mgr->queue_sz--; + if (!(cur->flags & RWNX_CMD_FLAG_NONBLOCK)) + complete(&cur->complete); + } + spin_unlock_bh(&cmd_mgr->lock); +} + +void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr) +{ + cmd_mgr->max_queue_sz = RWNX_CMD_MAX_QUEUED; + INIT_LIST_HEAD(&cmd_mgr->cmds); + cmd_mgr->state = RWNX_CMD_MGR_STATE_INITED; + spin_lock_init(&cmd_mgr->lock); + spin_lock_init(&cmd_mgr->cb_lock); + cmd_mgr->queue = &cmd_mgr_queue; + cmd_mgr->print = &cmd_mgr_print; + cmd_mgr->drain = &cmd_mgr_drain; + cmd_mgr->llind = NULL;//&cmd_mgr_llind; + cmd_mgr->msgind = &cmd_mgr_msgind; + + #if 0 + INIT_WORK(&cmd_mgr->cmdWork, cmd_mgr_task_process); + cmd_mgr->cmd_wq = create_singlethread_workqueue("cmd_wq"); + if (!cmd_mgr->cmd_wq) { + txrx_err("insufficient memory to create cmd workqueue.\n"); + return; + } + #endif +} + +void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr) +{ + cmd_mgr->print(cmd_mgr); + cmd_mgr->drain(cmd_mgr); + cmd_mgr->print(cmd_mgr); + memset(cmd_mgr, 0, sizeof(*cmd_mgr)); +} + +void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len) +{ + struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev; + struct aicwf_bus *bus = usbdev->bus_if; + u8 *buffer = bus->cmd_buf; + u16 index = 0; + + memset(buffer, 0, CMD_BUF_MAX); + buffer[0] = (len+4) & 0x00ff; + buffer[1] = ((len+4) >> 8) &0x0f; + buffer[2] = 0x11; + buffer[3] = 0x0; + index += 4; + //there is a dummy word + index += 4; + + //make sure little endian + put_u16(&buffer[index], msg->id); + index += 2; + put_u16(&buffer[index], msg->dest_id); + index += 2; + put_u16(&buffer[index], msg->src_id); + index += 2; + put_u16(&buffer[index], msg->param_len); + index += 2; + memcpy(&buffer[index], (u8 *)msg->param, msg->param_len); + + aicwf_bus_txmsg(bus, buffer, len + 8); +} + +static inline void *rwnx_msg_zalloc(lmac_msg_id_t const id, + lmac_task_id_t const dest_id, + lmac_task_id_t const src_id, + uint16_t const param_len) +{ + struct lmac_msg *msg; + gfp_t flags; + + if (in_softirq()) + flags = GFP_ATOMIC; + else + flags = GFP_KERNEL; + + msg = (struct lmac_msg *)kzalloc(sizeof(struct lmac_msg) + param_len, + flags); + if (msg == NULL) { + printk(KERN_CRIT "%s: msg allocation failed\n", __func__); + return NULL; + } + msg->id = id; + msg->dest_id = dest_id; + msg->src_id = src_id; + msg->param_len = param_len; + + return msg->param; +} + +static void rwnx_msg_free(struct lmac_msg *msg, const void *msg_params) +{ + kfree(msg); +} + + +static int rwnx_send_msg(struct aic_usb_dev *usbdev, const void *msg_params, + int reqcfm, lmac_msg_id_t reqid, void *cfm) +{ + struct lmac_msg *msg; + struct rwnx_cmd *cmd; + bool nonblock; + int ret = 0; + + msg = container_of((void *)msg_params, struct lmac_msg, param); + if(usbdev->bus_if->state == BUS_DOWN_ST) { + rwnx_msg_free(msg, msg_params); + printk("bus is down\n"); + return 0; + } + + nonblock = 0; + cmd = kzalloc(sizeof(struct rwnx_cmd), nonblock ? GFP_ATOMIC : GFP_KERNEL); + cmd->result = -EINTR; + cmd->id = msg->id; + cmd->reqid = reqid; + cmd->a2e_msg = msg; + cmd->e2a_msg = cfm; + if (nonblock) + cmd->flags = RWNX_CMD_FLAG_NONBLOCK; + if (reqcfm) + cmd->flags |= RWNX_CMD_FLAG_REQ_CFM; + + if(reqcfm) { + cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK; // we don't need ack any more + ret = usbdev->cmd_mgr.queue(&usbdev->cmd_mgr,cmd); + } else { + aicwf_set_cmd_tx((void *)(usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len); + } + + if(!reqcfm) + kfree(cmd); + + return ret; +} + +int rwnx_send_dbg_mem_mask_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, + u32 mem_mask, u32 mem_data) +{ + struct dbg_mem_mask_write_req *mem_mask_write_req; + + /* Build the DBG_MEM_MASK_WRITE_REQ message */ + mem_mask_write_req = rwnx_msg_zalloc(DBG_MEM_MASK_WRITE_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_mask_write_req)); + if (!mem_mask_write_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_MASK_WRITE_REQ message */ + mem_mask_write_req->memaddr = mem_addr; + mem_mask_write_req->memmask = mem_mask; + mem_mask_write_req->memdata = mem_data; + + /* Send the DBG_MEM_MASK_WRITE_REQ message to LMAC FW */ + return rwnx_send_msg(usbdev, mem_mask_write_req, 1, DBG_MEM_MASK_WRITE_CFM, NULL); +} + + + +int rwnx_send_dbg_mem_block_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, + u32 mem_size, u32 *mem_data) +{ + struct dbg_mem_block_write_req *mem_blk_write_req; + + /* Build the DBG_MEM_BLOCK_WRITE_REQ message */ + mem_blk_write_req = rwnx_msg_zalloc(DBG_MEM_BLOCK_WRITE_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_block_write_req)); + if (!mem_blk_write_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_BLOCK_WRITE_REQ message */ + mem_blk_write_req->memaddr = mem_addr; + mem_blk_write_req->memsize = mem_size; + memcpy(mem_blk_write_req->memdata, mem_data, mem_size); + + /* Send the DBG_MEM_BLOCK_WRITE_REQ message to LMAC FW */ + return rwnx_send_msg(usbdev, mem_blk_write_req, 1, DBG_MEM_BLOCK_WRITE_CFM, NULL); +} + + +int rwnx_send_dbg_mem_read_req(struct aic_usb_dev *usbdev, u32 mem_addr, + struct dbg_mem_read_cfm *cfm) +{ + struct dbg_mem_read_req *mem_read_req; + + + /* Build the DBG_MEM_READ_REQ message */ + mem_read_req = rwnx_msg_zalloc(DBG_MEM_READ_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_read_req)); + if (!mem_read_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_READ_REQ message */ + mem_read_req->memaddr = mem_addr; + + /* Send the DBG_MEM_READ_REQ message to LMAC FW */ + return rwnx_send_msg(usbdev, mem_read_req, 1, DBG_MEM_READ_CFM, cfm); +} + + +int rwnx_send_dbg_mem_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, u32 mem_data) +{ + struct dbg_mem_write_req *mem_write_req; + + //printk("%s mem_addr:%x mem_data:%x\r\n", __func__, mem_addr, mem_data); + + /* Build the DBG_MEM_WRITE_REQ message */ + mem_write_req = rwnx_msg_zalloc(DBG_MEM_WRITE_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_mem_write_req)); + if (!mem_write_req) + return -ENOMEM; + + /* Set parameters for the DBG_MEM_WRITE_REQ message */ + mem_write_req->memaddr = mem_addr; + mem_write_req->memdata = mem_data; + + /* Send the DBG_MEM_WRITE_REQ message to LMAC FW */ + return rwnx_send_msg(usbdev, mem_write_req, 1, DBG_MEM_WRITE_CFM, NULL); +} + +int rwnx_send_dbg_start_app_req(struct aic_usb_dev *usbdev, u32 boot_addr, + u32 boot_type) +{ + struct dbg_start_app_req *start_app_req; + + + /* Build the DBG_START_APP_REQ message */ + start_app_req = rwnx_msg_zalloc(DBG_START_APP_REQ, TASK_DBG, DRV_TASK_ID, + sizeof(struct dbg_start_app_req)); + if (!start_app_req) + return -ENOMEM; + + /* Set parameters for the DBG_START_APP_REQ message */ + start_app_req->bootaddr = boot_addr; + start_app_req->boottype = boot_type; + + /* Send the DBG_START_APP_REQ message to LMAC FW */ + return rwnx_send_msg(usbdev, start_app_req, 0, 0, NULL); +} + +static msg_cb_fct dbg_hdlrs[MSG_I(DBG_MAX)] = { +}; + +static msg_cb_fct *msg_hdlrs[] = { + [TASK_DBG] = dbg_hdlrs, +}; + +void rwnx_rx_handle_msg(struct aic_usb_dev *usbdev, struct ipc_e2a_msg *msg) +{ + usbdev->cmd_mgr.msgind(&usbdev->cmd_mgr, msg, + msg_hdlrs[MSG_T(msg->id)][MSG_I(msg->id)]); +} + + +int rwnx_send_reboot(struct aic_usb_dev *usbdev) +{ + int ret = 0; + u32 delay = 2 *1000; //1s + + printk("%s enter \r\n", __func__); + + ret = rwnx_send_dbg_start_app_req(usbdev, delay, HOST_START_APP_REBOOT); + return ret; +} + + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth_cmds.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth_cmds.h new file mode 100644 index 0000000..3b54bf9 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicbluetooth_cmds.h @@ -0,0 +1,326 @@ +/** + ****************************************************************************** + * + * rwnx_cmds.h + * + * Copyright (C) RivieraWaves 2014-2019 + * + ****************************************************************************** + */ + +#ifndef _AICBLUETOOTH_CMDS_H_ +#define _AICBLUETOOTH_CMDS_H_ + +#include +#include +#include + +#define RWNX_80211_CMD_TIMEOUT_MS 2000//500//300 + +#define RWNX_CMD_FLAG_NONBLOCK BIT(0) +#define RWNX_CMD_FLAG_REQ_CFM BIT(1) +#define RWNX_CMD_FLAG_WAIT_PUSH BIT(2) +#define RWNX_CMD_FLAG_WAIT_ACK BIT(3) +#define RWNX_CMD_FLAG_WAIT_CFM BIT(4) +#define RWNX_CMD_FLAG_DONE BIT(5) +/* ATM IPC design makes it possible to get the CFM before the ACK, + * otherwise this could have simply been a state enum */ +#define RWNX_CMD_WAIT_COMPLETE(flags) \ + (!(flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_CFM))) + +#define RWNX_CMD_MAX_QUEUED 8 + +//#include "ipc_shared.h" + +#define IPC_E2A_MSG_PARAM_SIZE 256 + +/// Message structure for MSGs from Emb to App +struct ipc_e2a_msg +{ + u16 id; ///< Message id. + u16 dummy_dest_id; + u16 dummy_src_id; + u16 param_len; ///< Parameter embedded struct length. + u32 pattern; ///< Used to stamp a valid MSG buffer + u32 param[IPC_E2A_MSG_PARAM_SIZE]; ///< Parameter embedded struct. Must be word-aligned. +}; + +typedef u16 lmac_msg_id_t; +typedef u16 lmac_task_id_t; + +struct lmac_msg +{ + lmac_msg_id_t id; ///< Message id. + lmac_task_id_t dest_id; ///< Destination kernel identifier. + lmac_task_id_t src_id; ///< Source kernel identifier. + u16 param_len; ///< Parameter embedded struct length. + u32 param[]; ///< Parameter embedded struct. Must be word-aligned. +}; + +struct aicbt_patch_info_t { + uint32_t info_len; +//base len start + uint32_t adid_addrinf; + uint32_t addr_adid; + uint32_t patch_addrinf; + uint32_t addr_patch; + uint32_t reset_addr; + uint32_t reset_val; + uint32_t adid_flag_addr; + uint32_t adid_flag; +//base len end +//ext patch nb + uint32_t ext_patch_nb_addr; + uint32_t ext_patch_nb; + uint32_t *ext_patch_param; + +}; + + +#define AICBT_PT_TAG "AICBT_PT_TAG" +#define AICBT_PT_INF 0x0 +#define AICBT_PT_TRAP 0x01 +#define AICBT_PT_B4 0x02 +#define AICBT_PT_BTMODE 0x03 +#define AICBT_PT_PWRON 0x04 +#define AICBT_PT_AF 0x05 + +#define rwnx_cmd_e2amsg ipc_e2a_msg +#define rwnx_cmd_a2emsg lmac_msg +#define RWNX_CMD_A2EMSG_LEN(m) (sizeof(struct lmac_msg) + m->param_len) +#define RWNX_CMD_E2AMSG_LEN_MAX (IPC_E2A_MSG_PARAM_SIZE * 4) + +static inline void put_u16(u8 *buf, u16 data) +{ + buf[0] = (u8)(data&0x00ff); + buf[1] = (u8)((data >> 8)&0x00ff); +} + +enum rwnx_cmd_mgr_state { + RWNX_CMD_MGR_STATE_DEINIT, + RWNX_CMD_MGR_STATE_INITED, + RWNX_CMD_MGR_STATE_CRASHED, +}; + +struct rwnx_cmd { + struct list_head list; + lmac_msg_id_t id; + lmac_msg_id_t reqid; + struct rwnx_cmd_a2emsg *a2e_msg; + char *e2a_msg; + u32 tkn; + u16 flags; + struct completion complete; + u32 result; +}; + +struct aic_sdio_dev; +struct aic_usb_dev; +struct rwnx_cmd; +typedef int (*msg_cb_fct)(struct rwnx_cmd *cmd, struct rwnx_cmd_e2amsg *msg); + +struct rwnx_cmd_mgr { + enum rwnx_cmd_mgr_state state; + spinlock_t lock; + u32 next_tkn; + u32 queue_sz; + u32 max_queue_sz; + spinlock_t cb_lock; + #if 0 + void *sdiodev; + #else + void *usbdev; + #endif + struct list_head cmds; + + int (*queue)(struct rwnx_cmd_mgr *, struct rwnx_cmd *); + int (*llind)(struct rwnx_cmd_mgr *, struct rwnx_cmd *); + int (*msgind)(struct rwnx_cmd_mgr *, struct rwnx_cmd_e2amsg *, msg_cb_fct); + void (*print)(struct rwnx_cmd_mgr *); + void (*drain)(struct rwnx_cmd_mgr *); + + struct work_struct cmdWork; + struct workqueue_struct *cmd_wq; +}; + + +#if 0 +#define WAKE_CMD_WORK(cmd_mgr) \ + do { \ + queue_work((cmd_mgr)->cmd_wq, &cmd_mgr->cmdWork); \ + } while (0) +#endif +void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr); +void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr); +int cmd_mgr_queue_force_defer(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd); +void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len); + +enum +{ + TASK_NONE = (u8) -1, + + // MAC Management task. + TASK_MM = 0, + // DEBUG task + TASK_DBG, + /// SCAN task + TASK_SCAN, + /// TDLS task + TASK_TDLS, + /// SCANU task + TASK_SCANU, + /// ME task + TASK_ME, + /// SM task + TASK_SM, + /// APM task + TASK_APM, + /// BAM task + TASK_BAM, + /// MESH task + TASK_MESH, + /// RXU task + TASK_RXU, + // This is used to define the last task that is running on the EMB processor + TASK_LAST_EMB = TASK_RXU, + + // nX API task + TASK_API, + TASK_MAX, +}; + +#define LMAC_FIRST_MSG(task) ((lmac_msg_id_t)((task) << 10)) +#define DRV_TASK_ID 100 +#define MSG_I(msg) ((msg) & ((1<<10)-1)) +#define MSG_T(msg) ((lmac_task_id_t)((msg) >> 10)) + + +enum dbg_msg_tag +{ + /// Memory read request + DBG_MEM_READ_REQ = LMAC_FIRST_MSG(TASK_DBG), + /// Memory read confirm + DBG_MEM_READ_CFM, + /// Memory write request + DBG_MEM_WRITE_REQ, + /// Memory write confirm + DBG_MEM_WRITE_CFM, + /// Module filter request + DBG_SET_MOD_FILTER_REQ, + /// Module filter confirm + DBG_SET_MOD_FILTER_CFM, + /// Severity filter request + DBG_SET_SEV_FILTER_REQ, + /// Severity filter confirm + DBG_SET_SEV_FILTER_CFM, + /// LMAC/MAC HW fatal error indication + DBG_ERROR_IND, + /// Request to get system statistics + DBG_GET_SYS_STAT_REQ, + /// COnfirmation of system statistics + DBG_GET_SYS_STAT_CFM, + /// Memory block write request + DBG_MEM_BLOCK_WRITE_REQ, + /// Memory block write confirm + DBG_MEM_BLOCK_WRITE_CFM, + /// Start app request + DBG_START_APP_REQ, + /// Start app confirm + DBG_START_APP_CFM, + /// Start npc request + DBG_START_NPC_REQ, + /// Start npc confirm + DBG_START_NPC_CFM, + /// Memory mask write request + DBG_MEM_MASK_WRITE_REQ, + /// Memory mask write confirm + DBG_MEM_MASK_WRITE_CFM, + /// Max number of Debug messages + DBG_MAX, +}; + +struct dbg_mem_block_write_req +{ + u32 memaddr; + u32 memsize; + u32 memdata[1024 / sizeof(u32)]; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_MASK_WRITE_REQ message. +struct dbg_mem_mask_write_req +{ + u32 memaddr; + u32 memmask; + u32 memdata; +}; + + +/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_WRITE_CFM message. +struct dbg_mem_block_write_cfm +{ + u32 wstatus; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_READ_REQ message. +struct dbg_mem_read_req +{ + u32 memaddr; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_READ_CFM message. +struct dbg_mem_read_cfm +{ + u32 memaddr; + u32 memdata; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_WRITE_REQ message. +struct dbg_mem_write_req +{ + u32 memaddr; + u32 memdata; +}; + +/// Structure containing the parameters of the @ref DBG_MEM_WRITE_CFM message. +struct dbg_mem_write_cfm +{ + u32 memaddr; + u32 memdata; +}; + +struct dbg_start_app_req +{ + u32 bootaddr; + u32 boottype; +}; + +/// Structure containing the parameters of the @ref DBG_START_APP_CFM message. +struct dbg_start_app_cfm +{ + u32 bootstatus; +}; + +enum { + HOST_START_APP_AUTO = 1, + HOST_START_APP_CUSTOM, + HOST_START_APP_REBOOT, +}; + +int rwnx_send_dbg_mem_mask_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, + u32 mem_mask, u32 mem_data); + + +int rwnx_send_dbg_mem_block_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, + u32 mem_size, u32 *mem_data); + +int rwnx_send_dbg_mem_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, u32 mem_data); +int rwnx_send_dbg_mem_read_req(struct aic_usb_dev *usbdev, u32 mem_addr, struct dbg_mem_read_cfm *cfm); + +void rwnx_rx_handle_msg(struct aic_usb_dev *usbdev, struct ipc_e2a_msg *msg); + +int rwnx_send_dbg_start_app_req(struct aic_usb_dev *usbdev, u32 boot_addr, + u32 boot_type); + +int rwnx_send_reboot(struct aic_usb_dev *usbdev); + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_debug.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_debug.h new file mode 100644 index 0000000..12fdca6 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_debug.h @@ -0,0 +1,52 @@ + + +#define RWNX_FN_ENTRY_STR ">>> %s()\n", __func__ + + + +/* message levels */ +#define LOGERROR 0x0001 +#define LOGINFO 0x0002 +#define LOGTRACE 0x0004 +#define LOGDEBUG 0x0008 +#define LOGDATA 0x0010 + +extern int aicwf_dbg_level; +void rwnx_data_dump(char* tag, void* data, unsigned long len); + +#define AICWF_LOG "AICWFDBG(" + +#define AICWFDBG(level, args, arg...) \ +do { \ + if (aicwf_dbg_level & level) { \ + printk(AICWF_LOG#level")\t" args, ##arg); \ + } \ +} while (0) + +#define RWNX_DBG(fmt, ...) \ +do { \ + if (aicwf_dbg_level & LOGTRACE) { \ + printk(AICWF_LOG"LOGTRACE)\t"fmt , ##__VA_ARGS__); \ + } \ +} while (0) + + + +#if 0 +#define RWNX_DBG(fmt, ...) \ + do { \ + if (aicwf_dbg_level & LOGTRACE) { \ + printk(AICWF_LOG"LOGTRACE"")\t" fmt, ##__VA_ARGS__); \ + } \ + } while (0) +#define AICWFDBG(args, level) \ +do { \ + if (aicwf_dbg_level & level) { \ + printk(AICWF_LOG"(%s)\t" ,#level); \ + printf args; \ + } \ +} while (0) +#endif + + + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_rx_prealloc.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_rx_prealloc.c new file mode 100644 index 0000000..d29edfd --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_rx_prealloc.c @@ -0,0 +1,126 @@ +#include +#include +#include +#include +#include +#include "aicwf_rx_prealloc.h" +#include "aicwf_debug.h" + +#ifdef CONFIG_PREALLOC_RX_SKB + +struct aicwf_rx_buff_list aic_rx_buff_list; + +int aic_rxbuff_num_max = 1000; +#ifdef CONFIG_PLATFORM_HI +int aic_rxbuff_size = (4 * 512) * 1; +#else +int aic_rxbuff_size = (4 * 512) * 10; +#endif + +int rx_buff_list_ava = 0; + +module_param(rx_buff_list_ava, int, 0660); + +int aicwf_rxbuff_size_get(void) +{ + return aic_rxbuff_size; +} + +struct rx_buff *aicwf_prealloc_rxbuff_alloc(spinlock_t *lock) +{ + unsigned long flags; + struct rx_buff *rxbuff = NULL; + + spin_lock_irqsave(lock, flags); + rx_buff_list_ava = atomic_read(&aic_rx_buff_list.rxbuff_list_len); + if(rx_buff_list_ava < 10){ + AICWFDBG(LOGERROR, "%s WARNING rxbuff is running out %d\r\n", __func__, + rx_buff_list_ava); + //msleep(10); + mdelay(10); + } + + if (list_empty(&aic_rx_buff_list.rxbuff_list)) { + spin_unlock_irqrestore(lock, flags); + AICWFDBG(LOGERROR, "%s %d, rxbuff list is empty\r\n", __func__, __LINE__); + return NULL; + } else { + rxbuff = list_first_entry(&aic_rx_buff_list.rxbuff_list, + struct rx_buff, queue); + list_del_init(&rxbuff->queue); + atomic_dec(&aic_rx_buff_list.rxbuff_list_len); + } + spin_unlock_irqrestore(lock, flags); + //printk("len:%d\n", aic_rx_buff_list.rxbuff_list_len); + memset(rxbuff->data, 0, aic_rxbuff_size); + rxbuff->len = 0; + rxbuff->start = NULL; + rxbuff->read = NULL; + rxbuff->end = NULL; + + return rxbuff; +} + +void aicwf_prealloc_rxbuff_free(struct rx_buff *rxbuff, spinlock_t *lock) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + list_add_tail(&rxbuff->queue, &aic_rx_buff_list.rxbuff_list); + atomic_inc(&aic_rx_buff_list.rxbuff_list_len); + rx_buff_list_ava = atomic_read(&aic_rx_buff_list.rxbuff_list_len); + spin_unlock_irqrestore(lock, flags); +} + +int aicwf_prealloc_init(void) +{ + struct rx_buff *rxbuff; + int i = 0; + + AICWFDBG(LOGINFO, "%s enter\n", __func__); + INIT_LIST_HEAD(&aic_rx_buff_list.rxbuff_list); + + for (i = 0 ; i < aic_rxbuff_num_max ; i++) { + rxbuff = kzalloc(sizeof(struct rx_buff), GFP_KERNEL); + if (rxbuff) { + rxbuff->data = kzalloc(aic_rxbuff_size, GFP_KERNEL); + if (rxbuff->data == NULL) { + AICWFDBG(LOGERROR, "failed to alloc rxbuff data\n"); + kfree(rxbuff); + continue; + } + rxbuff->len = 0; + rxbuff->start = NULL; + rxbuff->read = NULL; + rxbuff->end = NULL; + list_add_tail(&rxbuff->queue, &aic_rx_buff_list.rxbuff_list); + atomic_inc(&aic_rx_buff_list.rxbuff_list_len); + rx_buff_list_ava = atomic_read(&aic_rx_buff_list.rxbuff_list_len); + } + } + + AICWFDBG(LOGINFO, "pre alloc rxbuff list len: %d\n", (int)atomic_read(&aic_rx_buff_list.rxbuff_list_len)); + return 0; +} + +void aicwf_prealloc_exit(void) +{ + struct rx_buff *rxbuff; + struct rx_buff *pos; + + AICWFDBG(LOGINFO, "%s enter\n", __func__); + + AICWFDBG(LOGINFO, "free pre alloc rxbuff list %d\n", (int)atomic_read(&aic_rx_buff_list.rxbuff_list_len)); + list_for_each_entry_safe(rxbuff, pos, &aic_rx_buff_list.rxbuff_list, queue) { + list_del_init(&rxbuff->queue); + kfree(rxbuff->data); + kfree(rxbuff); + } +} + +EXPORT_SYMBOL(aicwf_rxbuff_size_get); +EXPORT_SYMBOL(aicwf_prealloc_rxbuff_alloc); +EXPORT_SYMBOL(aicwf_prealloc_rxbuff_free); + +#endif + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_rx_prealloc.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_rx_prealloc.h new file mode 100644 index 0000000..b2fafc1 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_rx_prealloc.h @@ -0,0 +1,26 @@ +#ifndef _AICWF_RX_PREALLOC_H_ +#define _AICWF_RX_PREALLOC_H_ + +#ifdef CONFIG_PREALLOC_RX_SKB + +struct rx_buff { + struct list_head queue; + unsigned char *data; + u32 len; + uint8_t *start; + uint8_t *end; + uint8_t *read; +}; + +struct aicwf_rx_buff_list { + struct list_head rxbuff_list; + atomic_t rxbuff_list_len; +}; + +struct rx_buff *aicwf_prealloc_rxbuff_alloc(spinlock_t *lock); +void aicwf_prealloc_rxbuff_free(struct rx_buff *rxbuff, spinlock_t *lock); +int aicwf_prealloc_init(void); +void aicwf_prealloc_exit(void); +int aicwf_rxbuff_size_get(void); +#endif +#endif /* _AICWF_RX_PREALLOC_H_ */ \ No newline at end of file diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_txq_prealloc.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_txq_prealloc.c new file mode 100644 index 0000000..cca0659 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_txq_prealloc.c @@ -0,0 +1,61 @@ +#include +#include "aicwf_debug.h" + +struct prealloc_txq{ + int prealloced; + void *txq; + size_t size; +}; + +struct prealloc_txq prealloc_txq; +#define MAX_TXQ_SIZE 100 * 1024 + +void *aicwf_prealloc_txq_alloc(size_t size) +{ + + BUG_ON(size > MAX_TXQ_SIZE); + + //check prealloc_txq.size + if((int)prealloc_txq.size != (int)size) + { + AICWFDBG(LOGINFO, "%s size is diff will to be kzalloc \r\n", __func__); + + if(prealloc_txq.txq != NULL) + { + AICWFDBG(LOGINFO, "%s txq to kfree \r\n", __func__); + kfree(prealloc_txq.txq); + prealloc_txq.txq = NULL; + } + + prealloc_txq.size = size; + prealloc_txq.prealloced = 0; + } + + //check prealloc or not + if(!prealloc_txq.prealloced) + { + prealloc_txq.txq = kzalloc(size, GFP_KERNEL); + if(!prealloc_txq.txq){ + AICWFDBG(LOGERROR, "%s txq kzalloc fail \r\n", __func__); + }else{ + AICWFDBG(LOGINFO, "%s txq kzalloc successful \r\n", __func__); + prealloc_txq.prealloced = 1; + } + }else{ + AICWFDBG(LOGINFO, "%s txq not need to kzalloc \r\n", __func__); + } + + return prealloc_txq.txq; +} +void aicwf_prealloc_txq_free(void) +{ + if(prealloc_txq.txq != NULL) + { + AICWFDBG(LOGINFO, "%s txq to kfree \r\n", __func__); + kfree(prealloc_txq.txq); + prealloc_txq.txq = NULL; + } +} + +EXPORT_SYMBOL(aicwf_prealloc_txq_alloc); + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_txq_prealloc.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_txq_prealloc.h new file mode 100644 index 0000000..ce4ee07 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_txq_prealloc.h @@ -0,0 +1,4 @@ + + +void aicwf_prealloc_txq_free(void); + diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_usb.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_usb.c new file mode 100755 index 0000000..b4ed26e --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_usb.c @@ -0,0 +1,1881 @@ +/** + * aicwf_usb.c + * + * USB function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "aic_txrxif.h" +#include "aicwf_usb.h" +#include "aicbluetooth.h" +#include "aicwf_debug.h" +#include "aic_compat_8800d80.h" +#include "aic_compat_8800d80x2.h" + +#define JUMP_TABLE_BASE 0x161928 +#define JUMP_TABLE_OFFSET(i) ((u32)(JUMP_TABLE_BASE+(i)*4)) +extern int adap_test; +extern int testmode; +extern unsigned char paringid[100]; +extern int ble_scan_wakeup_reboot_time; +extern uint32_t ad_data_filter_mask; +extern uint32_t gpio_num;//default select gpiob2 for fw_wakeup_host +extern uint32_t gpio_dft_lvl;//0:defalut pull down, 1:default pull up +u8 chip_id = 0; +u8 chip_sub_id = 0; +u8 chip_mcu_id = 0; +int fw_loaded = 0; + +void aicwf_usb_tx_flowctrl(struct aic_usb_dev *usb_dev, bool state) +{ +} + +static struct aicwf_usb_buf *aicwf_usb_tx_dequeue(struct aic_usb_dev *usb_dev, + struct list_head *q, int *counter, spinlock_t *qlock) +{ + unsigned long flags; + struct aicwf_usb_buf *usb_buf; + + spin_lock_irqsave(qlock, flags); + if (list_empty(q)) { + usb_buf = NULL; + } else { + usb_buf = list_first_entry(q, struct aicwf_usb_buf, list); + list_del_init(&usb_buf->list); + if (counter) + (*counter)--; + } + spin_unlock_irqrestore(qlock, flags); + return usb_buf; +} + +static void aicwf_usb_tx_queue(struct aic_usb_dev *usb_dev, + struct list_head *q, struct aicwf_usb_buf *usb_buf, int *counter, + spinlock_t *qlock) +{ + unsigned long flags; + + spin_lock_irqsave(qlock, flags); + list_add_tail(&usb_buf->list, q); + (*counter)++; + spin_unlock_irqrestore(qlock, flags); +} + +static struct aicwf_usb_buf *aicwf_usb_rx_buf_get(struct aic_usb_dev *usb_dev) +{ + unsigned long flags; + struct aicwf_usb_buf *usb_buf; + + spin_lock_irqsave(&usb_dev->rx_free_lock, flags); + if (list_empty(&usb_dev->rx_free_list)) { + usb_buf = NULL; + } else { + usb_buf = list_first_entry(&usb_dev->rx_free_list, struct aicwf_usb_buf, list); + list_del_init(&usb_buf->list); + } + spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags); + return usb_buf; +} + +static void aicwf_usb_rx_buf_put(struct aic_usb_dev *usb_dev, struct aicwf_usb_buf *usb_buf) +{ + unsigned long flags; + + spin_lock_irqsave(&usb_dev->rx_free_lock, flags); + list_add_tail(&usb_buf->list, &usb_dev->rx_free_list); + spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags); +} + +static void aicwf_usb_tx_complete(struct urb *urb) +{ + unsigned long flags; + struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context; + struct aic_usb_dev *usb_dev = usb_buf->usbdev; + struct sk_buff *skb; + + if (usb_buf->cfm == false) { + skb = usb_buf->skb; + dev_kfree_skb_any(skb); + } + #if !defined CONFIG_USB_NO_TRANS_DMA_MAP + else { + u8 *buf; + buf = (u8 *)usb_buf->skb; + kfree(buf); + } + #endif + usb_buf->skb = NULL; + + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + + spin_lock_irqsave(&usb_dev->tx_flow_lock, flags); + if (usb_dev->tx_free_count > AICWF_USB_TX_HIGH_WATER) { + if (usb_dev->tbusy) { + usb_dev->tbusy = false; + aicwf_usb_tx_flowctrl(usb_dev, false); + } + } + spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags); + } + +static void aicwf_usb_rx_complete(struct urb *urb) +{ + struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context; + struct aic_usb_dev *usb_dev = usb_buf->usbdev; + struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv; + struct sk_buff *skb = NULL; + unsigned long flags = 0; + + skb = usb_buf->skb; + usb_buf->skb = NULL; + + if (urb->actual_length > urb->transfer_buffer_length) { + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + schedule_work(&usb_dev->rx_urb_work); + return; + } + + if (urb->status != 0 || !urb->actual_length) { + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + schedule_work(&usb_dev->rx_urb_work); + return; + } + + if (usb_dev->state == USB_UP_ST) { + skb_put(skb, urb->actual_length); + + spin_lock_irqsave(&rx_priv->rxqlock, flags); + if(!aicwf_rxframe_enqueue(usb_dev->dev, &rx_priv->rxq, skb)){ + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + usb_err("rx_priv->rxq is over flow!!!\n"); + aicwf_dev_skb_free(skb); + return; + } + spin_unlock_irqrestore(&rx_priv->rxqlock, flags); + atomic_inc(&rx_priv->rx_cnt); + complete(&rx_priv->usbdev->bus_if->busrx_trgg); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + + schedule_work(&usb_dev->rx_urb_work); + } else { + aicwf_dev_skb_free(skb); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + } +} + +static int aicwf_usb_submit_rx_urb(struct aic_usb_dev *usb_dev, + struct aicwf_usb_buf *usb_buf) +{ + struct sk_buff *skb; + int ret; + + if (!usb_buf || !usb_dev) + return -1; + + if (usb_dev->state != USB_UP_ST) { + usb_err("usb state is not up!\n"); + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + return -1; + } + + skb = __dev_alloc_skb(AICWF_USB_MAX_PKT_SIZE, GFP_KERNEL); + if (!skb) { + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + return -1; + } + + usb_buf->skb = skb; + + usb_fill_bulk_urb(usb_buf->urb, + usb_dev->udev, + usb_dev->bulk_in_pipe, + skb->data, skb_tailroom(skb), aicwf_usb_rx_complete, usb_buf); + + usb_buf->usbdev = usb_dev; + + usb_anchor_urb(usb_buf->urb, &usb_dev->rx_submitted); + + ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC); + if (ret) { + usb_err("usb submit rx urb fail:%d\n", ret); + usb_unanchor_urb(usb_buf->urb); + aicwf_dev_skb_free(usb_buf->skb); + usb_buf->skb = NULL; + aicwf_usb_rx_buf_put(usb_dev, usb_buf); + msleep(100); + return ret; + } + return 0; +} + +static void aicwf_usb_rx_submit_all_urb(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf; + + if (usb_dev->state != USB_UP_ST) { + usb_err("bus is not up=%d\n", usb_dev->state); + return; + } + if (usb_dev->app_cmp) { + printk("app_cmp\r\n"); + return; +} + + while((usb_buf = aicwf_usb_rx_buf_get(usb_dev)) != NULL) { + if (aicwf_usb_submit_rx_urb(usb_dev, usb_buf)) { + usb_err("usb rx refill fail\n"); + if (usb_dev->state != USB_UP_ST){ + return; + } + break; + } + } +} + +static void aicwf_usb_rx_prepare(struct aic_usb_dev *usb_dev) +{ + aicwf_usb_rx_submit_all_urb(usb_dev); +} + +static void aicwf_usb_tx_prepare(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf; + + while(!list_empty(&usb_dev->tx_post_list)){ + usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + if(usb_buf->skb) { + dev_kfree_skb(usb_buf->skb); + usb_buf->skb = NULL; + } + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + } +} +static void aicwf_usb_tx_process(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf; + int ret = 0; + u8* data = NULL; + + while(!list_empty(&usb_dev->tx_post_list)) { + if (usb_dev->state != USB_UP_ST) { + usb_err("usb state is not up!\n"); + return; + } + + usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + if(!usb_buf) { + usb_err("can not get usb_buf from tx_post_list!\n"); + return; + } + data = usb_buf->skb->data; + + ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC); + if (ret) { + usb_err("aicwf_usb_bus_tx usb_submit_urb FAILED\n"); + goto fail; + } + + continue; +fail: + dev_kfree_skb(usb_buf->skb); + usb_buf->skb = NULL; + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + } +} + +static inline void aic_thread_wait_stop(void) +{ +#if 1// PLATFORM_LINUX + #if 0 + while (!kthread_should_stop()) + rtw_msleep_os(10); + #else + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + #endif +#endif +} + +int usb_bustx_thread(void *data) +{ + struct aicwf_bus *bus = (struct aicwf_bus *)data; + struct aic_usb_dev *usbdev = bus->bus_priv.usb; + + while (1) { + /* + if(kthread_should_stop()) { + usb_err("usb bustx thread stop\n"); + break; + } + */ + + if (!wait_for_completion_interruptible(&bus->bustx_trgg)) { + if (usbdev->app_cmp == false){ + printk("usb bustx thread will to stop\n"); + //mdelay(100); + break; + //continue; + } + if (usbdev->tx_post_count > 0) + aicwf_usb_tx_process(usbdev); + } + } + + aic_thread_wait_stop(); + printk("usb bustx thread stop\n"); + + return 0; +} + +int usb_busrx_thread(void *data) +{ + struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data; + struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if; + + allow_signal(SIGTERM); + while (1) { + #if 0 + if(kthread_should_stop()) { + usb_err("usb busrx thread stop\n"); + break; + } + #endif + if (!wait_for_completion_interruptible(&bus_if->busrx_trgg)) { + aicwf_process_rxframes(rx_priv); + } else { + break; + } + if (bus_if->state == BUS_DOWN_ST) { + printk("usb busrx thread will to stop\n"); + break; + } + + } + aic_thread_wait_stop(); + printk("usb busrx thread stop\n"); + + return 0; +} + +static void aicwf_usb_send_msg_complete(struct urb *urb) +{ + struct aic_usb_dev *usb_dev = (struct aic_usb_dev *) urb->context; + + usb_dev->msg_finished = true; + if (waitqueue_active(&usb_dev->msg_wait)) + wake_up(&usb_dev->msg_wait); +} + +static int aicwf_usb_bus_txmsg(struct device *dev, u8 *buf, u32 len) +{ + int ret = 0; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + + if (usb_dev->state != USB_UP_ST) + return -EIO; + + if (buf == NULL || len == 0 || usb_dev->msg_out_urb == NULL) + return -EINVAL; + + if (test_and_set_bit(0, &usb_dev->msg_busy)) { + usb_err("In a control frame option, can't tx!\n"); + return -EIO; + } + + usb_dev->msg_finished = false; + +#ifdef CONFIG_USB_MSG_EP + if (usb_dev->msg_out_pipe && usb_dev->use_msg_ep == 1) { + usb_fill_bulk_urb(usb_dev->msg_out_urb, + usb_dev->udev, + usb_dev->msg_out_pipe, + buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev); + } else { + usb_fill_bulk_urb(usb_dev->msg_out_urb, + usb_dev->udev, + usb_dev->bulk_out_pipe, + buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev); + } +#else + usb_fill_bulk_urb(usb_dev->msg_out_urb, + usb_dev->udev, + usb_dev->bulk_out_pipe, + buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev); +#endif + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + usb_dev->msg_out_urb->transfer_dma = usb_dev->cmd_dma_trans_addr; + usb_dev->msg_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + #endif + usb_dev->msg_out_urb->transfer_flags |= URB_ZERO_PACKET; + + ret = usb_submit_urb(usb_dev->msg_out_urb, GFP_ATOMIC); + if (ret) { + usb_err("usb_submit_urb failed %d\n", ret); + goto exit; + } + + ret = wait_event_timeout(usb_dev->msg_wait, + usb_dev->msg_finished, msecs_to_jiffies(CMD_TX_TIMEOUT)); + if (!ret) { + if (usb_dev->msg_out_urb) + usb_kill_urb(usb_dev->msg_out_urb); + usb_err("Txmsg wait timed out\n"); + ret = -EIO; + goto exit; + } + + if (usb_dev->msg_finished == false) { + usb_err("Txmsg timed out\n"); + ret = -ETIMEDOUT; + goto exit; + } +exit: + clear_bit(0, &usb_dev->msg_busy); + return ret; +} + + +static void aicwf_usb_free_urb(struct list_head *q, spinlock_t *qlock) +{ + struct aicwf_usb_buf *usb_buf, *tmp; + unsigned long flags; + + spin_lock_irqsave(qlock, flags); + list_for_each_entry_safe(usb_buf, tmp, q, list) { + spin_unlock_irqrestore(qlock, flags); + if (!usb_buf->urb) { + usb_err("bad usb_buf\n"); + spin_lock_irqsave(qlock, flags); + break; + } + usb_free_urb(usb_buf->urb); + list_del_init(&usb_buf->list); + spin_lock_irqsave(qlock, flags); + } + spin_unlock_irqrestore(qlock, flags); +} + +static int aicwf_usb_alloc_rx_urb(struct aic_usb_dev *usb_dev) +{ + int i; + + for (i = 0; i < AICWF_USB_RX_URBS; i++) { + struct aicwf_usb_buf *usb_buf = &usb_dev->usb_rx_buf[i]; + + usb_buf->usbdev = usb_dev; + usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!usb_buf->urb) { + usb_err("could not allocate rx data urb\n"); + goto err; + } + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + // dma buf unused + usb_buf->data_buf = NULL; + usb_buf->data_dma_trans_addr = 0x0; + #endif + list_add_tail(&usb_buf->list, &usb_dev->rx_free_list); + } + return 0; + +err: + aicwf_usb_free_urb(&usb_dev->rx_free_list, &usb_dev->rx_free_lock); + return -ENOMEM; +} + +static int aicwf_usb_alloc_tx_urb(struct aic_usb_dev *usb_dev) +{ + int i; + + for (i = 0; i < AICWF_USB_TX_URBS; i++) { + struct aicwf_usb_buf *usb_buf = &usb_dev->usb_tx_buf[i]; + + usb_buf->usbdev = usb_dev; + usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!usb_buf->urb) { + usb_err("could not allocate tx data urb\n"); + goto err; + } + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + // dma buf unused + usb_buf->data_buf = NULL; + usb_buf->data_dma_trans_addr = 0x0; + #endif + list_add_tail(&usb_buf->list, &usb_dev->tx_free_list); + (usb_dev->tx_free_count)++; + } + return 0; + +err: + aicwf_usb_free_urb(&usb_dev->tx_free_list, &usb_dev->tx_free_lock); + return -ENOMEM; +} + + +static void aicwf_usb_state_change(struct aic_usb_dev *usb_dev, int state) +{ + int old_state; + + if (usb_dev->state == state) + return; + + old_state = usb_dev->state; + usb_dev->state = state; + + if (state == USB_DOWN_ST) { + usb_dev->bus_if->state = BUS_DOWN_ST; + } + if (state == USB_UP_ST) { + usb_dev->bus_if->state = BUS_UP_ST; + } +} + +static int aicwf_usb_bus_txdata(struct device *dev, struct sk_buff *skb) +{ + u8 *buf = NULL; + u16 buf_len = 0; + struct aicwf_usb_buf *usb_buf; + int ret = 0; + unsigned long flags; + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + bool need_cfm = false; + + if (usb_dev->state != USB_UP_ST) { + usb_err("usb state is not up!\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_free_list, + &usb_dev->tx_free_count, &usb_dev->tx_free_lock); + if (!usb_buf) { + usb_err("free:%d, post:%d\n", usb_dev->tx_free_count, usb_dev->tx_post_count); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto flow_ctrl; + } + + usb_buf->usbdev = usb_dev; + if (need_cfm) + usb_buf->cfm = true; + else + usb_buf->cfm = false; + usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe, + buf, buf_len, aicwf_usb_tx_complete, usb_buf); + #if defined CONFIG_USB_NO_TRANS_DMA_MAP + usb_buf->urb->transfer_dma = usb_buf->data_dma_trans_addr; + usb_buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + #endif + usb_buf->urb->transfer_flags |= URB_ZERO_PACKET; + + aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf, + &usb_dev->tx_post_count, &usb_dev->tx_post_lock); + complete(&bus_if->bustx_trgg); + ret = 0; + + flow_ctrl: + spin_lock_irqsave(&usb_dev->tx_flow_lock, flags); + if (usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER) { + usb_dev->tbusy = true; + aicwf_usb_tx_flowctrl(usb_dev, true); + } + spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags); + + return ret; +} + +static int aicwf_usb_bus_start(struct device *dev) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + + if (usb_dev->state == USB_UP_ST) + return 0; + + aicwf_usb_state_change(usb_dev, USB_UP_ST); + aicwf_usb_rx_prepare(usb_dev); + aicwf_usb_tx_prepare(usb_dev); + return 0; +} + +static void aicwf_usb_cancel_all_urbs(struct aic_usb_dev *usb_dev) +{ + struct aicwf_usb_buf *usb_buf, *tmp; + unsigned long flags; + + if (usb_dev->msg_out_urb) + usb_kill_urb(usb_dev->msg_out_urb); + + spin_lock_irqsave(&usb_dev->tx_post_lock, flags); + list_for_each_entry_safe(usb_buf, tmp, &usb_dev->tx_post_list, list) { + spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags); + if (!usb_buf->urb) { + usb_err("bad usb_buf\n"); + spin_lock_irqsave(&usb_dev->tx_post_lock, flags); + break; + } + usb_kill_urb(usb_buf->urb); + spin_lock_irqsave(&usb_dev->tx_post_lock, flags); + } + spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags); + + usb_kill_anchored_urbs(&usb_dev->rx_submitted); +} + +static void aicwf_usb_bus_stop(struct device *dev) +{ + struct aicwf_bus *bus_if = dev_get_drvdata(dev); + struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb; + + if (usb_dev == NULL) + return; + + if (usb_dev->state == USB_DOWN_ST) + return; + + aicwf_usb_state_change(usb_dev, USB_DOWN_ST); + aicwf_usb_cancel_all_urbs(usb_dev); +} + +static void aicwf_usb_deinit(struct aic_usb_dev *usbdev) +{ + cancel_work_sync(&usbdev->rx_urb_work); + aicwf_usb_free_urb(&usbdev->rx_free_list, &usbdev->rx_free_lock); + aicwf_usb_free_urb(&usbdev->tx_free_list, &usbdev->tx_free_lock); + aicwf_usb_free_urb(&usbdev->tx_post_list, &usbdev->tx_post_lock); + usb_free_urb(usbdev->msg_out_urb); +} + +static void aicwf_usb_rx_urb_work(struct work_struct *work) +{ + struct aic_usb_dev *usb_dev = container_of(work, struct aic_usb_dev, rx_urb_work); + + aicwf_usb_rx_submit_all_urb(usb_dev); +} + +static int aicwf_usb_init(struct aic_usb_dev *usb_dev) +{ + int ret = 0; + + usb_dev->tbusy = false; + usb_dev->app_cmp = false; + usb_dev->state = USB_DOWN_ST; + + init_waitqueue_head(&usb_dev->msg_wait); + init_usb_anchor(&usb_dev->rx_submitted); + + spin_lock_init(&usb_dev->tx_free_lock); + spin_lock_init(&usb_dev->tx_post_lock); + spin_lock_init(&usb_dev->rx_free_lock); + spin_lock_init(&usb_dev->tx_flow_lock); + + INIT_LIST_HEAD(&usb_dev->rx_free_list); + INIT_LIST_HEAD(&usb_dev->tx_free_list); + INIT_LIST_HEAD(&usb_dev->tx_post_list); + + usb_dev->tx_free_count = 0; + usb_dev->tx_post_count = 0; + + ret = aicwf_usb_alloc_rx_urb(usb_dev); + if (ret) { + goto error; + } + ret = aicwf_usb_alloc_tx_urb(usb_dev); + if (ret) { + goto error; + } + + + usb_dev->msg_out_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!usb_dev->msg_out_urb) { + usb_err("usb_alloc_urb (msg out) failed\n"); + ret = ENOMEM; + goto error; + } + + INIT_WORK(&usb_dev->rx_urb_work, aicwf_usb_rx_urb_work); + + return ret; + error: + usb_err("failed!\n"); + aicwf_usb_deinit(usb_dev); + return ret; +} + +static int aicwf_parse_usb(struct aic_usb_dev *usb_dev, struct usb_interface *interface, int pid) +{ + struct usb_interface_descriptor *interface_desc; + struct usb_host_interface *host_interface; + struct usb_endpoint_descriptor *endpoint; + struct usb_device *usb = usb_dev->udev; + int i, endpoints; + u8 endpoint_num; + int ret = 0; + + usb_dev->bulk_in_pipe = 0; + usb_dev->bulk_out_pipe = 0; +#ifdef CONFIG_USB_MSG_EP + usb_dev->msg_out_pipe = 0; +#endif + + + host_interface = &interface->altsetting[0]; + interface_desc = &host_interface->desc; + endpoints = interface_desc->bNumEndpoints; + + /* Check device configuration */ + if (usb->descriptor.bNumConfigurations != 1) { + usb_err("Number of configurations: %d not supported\n", + usb->descriptor.bNumConfigurations); + ret = -ENODEV; + goto exit; + } + + /* Check deviceclass */ + if (usb->descriptor.bDeviceClass != 0x00 && usb->descriptor.bDeviceClass != 239) { + usb_err("DeviceClass %d not supported\n", + usb->descriptor.bDeviceClass); + ret = -ENODEV; + goto exit; + } + + /* Check interface number */ + if (usb->actconfig->desc.bNumInterfaces != 1 && usb->actconfig->desc.bNumInterfaces != 3) { + usb_err("Number of interfaces: %d not supported\n", + usb->actconfig->desc.bNumInterfaces); + ret = -ENODEV; + goto exit; + } + + if ((interface_desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || + (interface_desc->bInterfaceSubClass != 0xff) || + (interface_desc->bInterfaceProtocol != 0xff)) { + usb_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n", + interface_desc->bInterfaceNumber, interface_desc->bInterfaceClass, + interface_desc->bInterfaceSubClass, interface_desc->bInterfaceProtocol); + ret = -ENODEV; + goto exit; + } + + for (i = 0; i < endpoints; i++) { + endpoint = &host_interface->endpoint[i].desc; + endpoint_num = usb_endpoint_num(endpoint); + + if (usb_endpoint_dir_in(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + if (!usb_dev->bulk_in_pipe) { + usb_dev->bulk_in_pipe = usb_rcvbulkpipe(usb, endpoint_num); + } + } + + if (usb_endpoint_dir_out(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + if (!usb_dev->bulk_out_pipe) + { + usb_dev->bulk_out_pipe = usb_sndbulkpipe(usb, endpoint_num); + } +#ifdef CONFIG_USB_MSG_EP + else if (!usb_dev->msg_out_pipe) { + usb_dev->msg_out_pipe = usb_sndbulkpipe(usb, endpoint_num); + } +#endif + } + } + + if (usb_dev->bulk_in_pipe == 0) { + usb_err("No RX (in) Bulk EP found\n"); + ret = -ENODEV; + goto exit; + } + if (usb_dev->bulk_out_pipe == 0) { + usb_err("No TX (out) Bulk EP found\n"); + ret = -ENODEV; + goto exit; + } + +#ifdef CONFIG_USB_MSG_EP + if ( usb_dev->msg_out_pipe != 0 && + (usb_dev->chipid == PRODUCT_ID_AIC8801 || + usb_dev->chipid == PRODUCT_ID_AIC8800D81|| + usb_dev->chipid == PRODUCT_ID_AIC8800D81X2 || + usb_dev->chipid == PRODUCT_ID_AIC8800D89X2)){ + printk("TX Msg Bulk EP found\n"); + usb_dev->use_msg_ep = 1; + }else{ + usb_dev->use_msg_ep = 0; + } + +#if 0 + if (usb_dev->msg_out_pipe == 0) { + usb_err("No TX Msg (out) Bulk EP found\n"); + usb_dev->use_msg_ep = 0; + }else{ + usb_dev->use_msg_ep = 1; + } +#endif +#endif + + printk("Aic %s speed USB device detected\n", + (usb->speed == USB_SPEED_SUPER) ? "super" : + (usb->speed == USB_SPEED_HIGH) ? "high" : + (usb->speed == USB_SPEED_FULL) ? "full" : + (usb->speed == USB_SPEED_LOW) ? "low" : "NG"); + exit: + return ret; +} + + + +static struct aicwf_bus_ops aicwf_usb_bus_ops = { + .start = aicwf_usb_bus_start, + .stop = aicwf_usb_bus_stop, + .txdata = aicwf_usb_bus_txdata, + .txmsg = aicwf_usb_bus_txmsg, +}; + +#if 0 +u32 patch_tbl[][2] = +{ +#if defined(CONFIG_RFTEST) + {JUMP_TABLE_OFFSET(28), 0x16b4c5}, // 161998 + {JUMP_TABLE_OFFSET(130), 0x16b379}, // 161B30 + {JUMP_TABLE_OFFSET(126), 0x16b155}, // 161B20 + {JUMP_TABLE_OFFSET(132), 0x16b38d}, // 161b38 + {JUMP_TABLE_OFFSET(34), 0x16b515}, // 1619b0 +#elif defined(CONFIG_PLATFORM_UBUNTU) + {JUMP_TABLE_OFFSET(28), 0x16b5a5}, // 161998 + {JUMP_TABLE_OFFSET(130), 0x16b459}, // 161B30 + {JUMP_TABLE_OFFSET(126), 0x16b235}, // 161B20 + {JUMP_TABLE_OFFSET(132), 0x16b46d}, // 161b38 + {JUMP_TABLE_OFFSET(34), 0x16b5f5}, // 1619b0 +#else + {JUMP_TABLE_OFFSET(28), 0x16b231}, // 161998 + {JUMP_TABLE_OFFSET(130), 0x16b0e5}, // 161B30 + {JUMP_TABLE_OFFSET(126), 0x16aec1}, // 161B20 + {JUMP_TABLE_OFFSET(132), 0x16b0f9}, // 161b38 + {JUMP_TABLE_OFFSET(34), 0x16b281}, // 1619b0 +#endif +}; +#endif + +#if 0 +u32 patch_tbl_rf[][2] = +{ + {JUMP_TABLE_OFFSET(28), 0x16b4c5}, // 161998 + {JUMP_TABLE_OFFSET(130), 0x16b379}, // 161B30 + {JUMP_TABLE_OFFSET(126), 0x16b155}, // 161B20 + {JUMP_TABLE_OFFSET(132), 0x16b38d}, // 161b38 + {JUMP_TABLE_OFFSET(34), 0x16b515}, // 1619b0 +}; +#endif +#if 0 +u32 patch_tbl[18][2] = +{ + {0x0044, 0x00000002}, //hosttype + {0x0048, 0x00000060}, +#ifdef CONFIG_USB_BT + {0x004c, 0x00000040}, +#else + {0x004c, 0x00000046}, +#endif + {0x0050, 0x00000000}, //ipc base + {0x0054, 0x001a0000}, //buf base + {0x0058, 0x001a0140}, //desc base + {0x005c, 0x00000ee0}, //desc size + {0x0060, 0x001a1020}, //pkt base +#ifdef CONFIG_USB_BT + {0x0064, 0x0001EFE0}, //pkt size +#else + {0x0064, 0x000207e0}, //pkt size +#endif + {0x0068, 0x00000008}, + {0x006c, 0x00000040}, + {0x0070, 0x00000040}, + {0x0074, 0x00000020}, + {0x0078, 0x00000000}, + {0x007c, 0x00000040}, + {0x0080, 0x00190000}, + {0x0084, 0x0000fc00},//63kB + {0x0088, 0x0019fc00} +}; +#endif + +u32 adaptivity_patch_tbl[][2] = { + {0x0004, 0x0000320A}, //linkloss_thd + {0x0094, 0x00000000}, //ac_param_conf + {0x00F8, 0x00010138}, //tx_adaptivity_en +}; + +u32 patch_tbl[][2] ={ +{0x0044, 0x00000002}, //hosttype +{0x0048, 0x00000060}, +#if 1//def CONFIG_USB_BT +#ifdef CONFIG_USB_MSG_EP +{0x004c, 0x00040050}, +#else +{0x004c, 0x00000052}, +#endif +#else +{0x004c, 0x00000046}, +#endif +{0x0050, 0x00000000}, //ipc base +{0x0054, 0x00190000}, //buf base +{0x0058, 0x00190140}, //desc base +{0x005c, 0x00000ee0}, //desc size +{0x0060, 0x00191020}, //pkt base +#if 1//def CONFIG_USB_BT +{0x0064, 0x0002EFE0}, //pkt size +#else +{0x0064, 0x000207e0}, //pkt size +#endif +{0x0068, 0x00000008}, //tx BK A-MPDU 8 +{0x006c, 0x00000040}, //tx BE A-MPDU 40 +{0x0070, 0x00000040}, //tx VI A-MPDU 40 +{0x0074, 0x00000020}, //tx VO A-MPDU 20 +{0x0078, 0x00000000}, //bcn queue +{0x007c, 0x00000020}, //rx A-MPDU 20 +{0x0080, 0x001d0000}, +{0x0084, 0x0000fc00},//63kB +{0x0088, 0x001dfc00}, +{0x00A8, 0x8D080103},//dm 8D08 +{0x00D0, 0x00010103},//aon sram +#ifdef CONFIG_USB_MSG_EP +{0x00D4, 0x0404087C}, +#else +{0x00D4, 0x0000087C}, +#endif +{0x00D8, 0x001C0000},//bt base +{0x00DC, 0x00008000},//bt size +{0x00E0, 0x04020A08}, +{0x00E4, 0x00000001}, +#ifdef CONFIG_USB_MSG_EP +{0x00FC, 0x00000302},//rx msg fc pkt cnt +#endif +{0x0100, 0x0000000F}, //usb_reboot_additional_delay +#if !defined(CONFIG_LINK_DET_5G) +{0x0104, 0x00000000}, //link_det_5g +#endif +#ifdef CONFIG_USB_SUSPEND_REBOOT_TIME +{0x0110, 0x03e80001}//reboot time when usb suspend,0001 enables reboot on suspend, default 0x3e8 = 1000ms reboot +#else +#ifdef CONFIG_SUPPORT_USB_SUSP +//USB SUSP keep working 0x0100:enable 0x0000:disable +{0x0110, 0x00000100} +#endif +#endif +}; + + +#if 0 +u32 patch_tbl[][2] = +{ +#ifdef CONFIG_PLATFORM_UBUNTU + {JUMP_TABLE_OFFSET(28), 0x16b5a5}, // 161998 + {JUMP_TABLE_OFFSET(130), 0x16b459}, // 161B30 + {JUMP_TABLE_OFFSET(126), 0x16b235}, // 161B20 + {JUMP_TABLE_OFFSET(132), 0x16b46d}, // 161b38 + {JUMP_TABLE_OFFSET(34), 0x16b5f5}, // 1619b0 +#else + {JUMP_TABLE_OFFSET(28), 0x16b231}, // 161998 + {JUMP_TABLE_OFFSET(130), 0x16b0e5}, // 161B30 + {JUMP_TABLE_OFFSET(126), 0x16aec1}, // 161B20 + {JUMP_TABLE_OFFSET(132), 0x16b0f9}, // 161b38 + {JUMP_TABLE_OFFSET(34), 0x16b281}, // 1619b0 +#endif +}; +#endif + +u32 syscfg_tbl_pmic_u02[][2] = { + {0x40040000, 0x00001AC8}, // 1) fix panic + {0x40040084, 0x00011580}, + {0x40040080, 0x00000001}, + {0x40100058, 0x00000000}, +}; + +u32 syscfg_tbl_u04[][2] = { + {0x40040000, 0x0000042C}, // protect usb replenish rxq / flush rxq, skip flush rxq before start_app + {0x40040004, 0x0000DD44}, + {0x40040008, 0x00000448}, + {0x4004000C, 0x0000044C}, + {0x0019B800, 0xB9F0F19B}, + {0x0019B804, 0x0019B81D}, + {0x0019B808, 0xBF00FA79}, + {0x0019B80C, 0xF007BF00}, + {0x0019B810, 0x4605B672}, // code + {0x0019B814, 0x21E0F04F}, + {0x0019B818, 0xBE0BF664}, + {0x0019B81C, 0xF665B510}, + {0x0019B820, 0x4804FC9D}, + {0x0019B824, 0xFA9EF66C}, + {0x0019B828, 0xFCA8F665}, + {0x0019B82C, 0x4010E8BD}, + {0x0019B830, 0xBAC6F66C}, + {0x0019B834, 0x0019A0C4}, + {0x40040084, 0x0019B800}, // out base + {0x40040080, 0x0000000F}, + {0x40100058, 0x00000000}, +}; + +u32 syscfg_tbl[][2] = { + {0x40500014, 0x00000101}, // 1) + {0x40500018, 0x0000010d}, // 2)//bt only:10d ,bt combo and bt only sw:109 + {0x40500004, 0x00000010}, // 3) the order should not be changed + #if 1//CONFIG_PMIC_SETTING + {0x50000000, 0x03220204}, // 2) pmic interface init + {0x50019150, 0x00000002}, + {0x50017008, 0x00000000}, // 4) stop wdg + #endif /* CONFIG_PMIC_SETTING */ +}; + +u32 sys_reboot_tbl[][2] = { + {0x50017000, 0x0001ffff}, + {0x50017008, 0x00000002}, +}; + +#if 0 +u32 bt_config_tbl[][2] = +{ + {0x0016f000, 0xe0250793}, + {0x40080000, 0x0008bd64}, + #if 0 + {0x0016f004, 0xe0052b00}, + {0x40080004, 0x000b5570}, + {0x0016f008, 0xe7b12b00}, + {0x40080008, 0x000b5748}, + {0x0016f00c, 0x001ad00e}, + {0x4008000c, 0x000ac8f8}, + #endif + {0x40080084, 0x0016f000}, // out + {0x40080080, 0x00000001}, // en + {0x40100058, 0x00000000}, // bypass +}; +#endif + +u32 rf_tbl_masked[][3] = { + {0x40344058, 0x00800000, 0x00000000},// pll trx +}; + +static int system_config_8800(struct aic_usb_dev *usb_dev){ + int syscfg_num; + int ret, cnt; + const u32 mem_addr = 0x40500000; + struct dbg_mem_read_cfm rd_mem_addr_cfm; + ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr, &rd_mem_addr_cfm); + if (ret) { + printk("%x rd fail: %d\n", mem_addr, ret); + return ret; + } + chip_id =(u8)(rd_mem_addr_cfm.memdata >> 16); + //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + ret = rwnx_send_dbg_mem_read_req(usb_dev, 0x00000004, &rd_mem_addr_cfm); + if (ret) { + printk("[0x00000004] rd fail: %d\n", ret); + return ret; + } + chip_sub_id = (u8)(rd_mem_addr_cfm.memdata >> 4); + //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata); + printk("chip_id=%x, chip_sub_id=%x\n", chip_id, chip_sub_id); + if (chip_id == CHIP_REV_U02) { + syscfg_num = sizeof(syscfg_tbl_pmic_u02) / sizeof(u32) / 2; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl_pmic_u02[cnt][0], syscfg_tbl_pmic_u02[cnt][1]); + if (ret) { + printk("%x write fail: %d\n", syscfg_tbl_pmic_u02[cnt][0], ret); + return ret; + } + } + } + if ((chip_id == CHIP_REV_U03) && (chip_sub_id == CHIP_SUB_REV_U04)) { + syscfg_num = sizeof(syscfg_tbl_u04) / sizeof(u32) / 2; + printk("cfg u04\n"); + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl_u04[cnt][0], syscfg_tbl_u04[cnt][1]); + if (ret) { + printk("%x write fail: %d\n", syscfg_tbl_u04[cnt][0], ret); + return ret; + } + } + } + syscfg_num = sizeof(syscfg_tbl) / sizeof(u32) / 2; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl[cnt][0], syscfg_tbl[cnt][1]); + if (ret) { + printk("%x write fail: %d\n", syscfg_tbl[cnt][0], ret); + return ret; + } + } + return 0; + +} + +static int system_config(struct aic_usb_dev *usb_dev) +{ + if(usb_dev->chipid == PRODUCT_ID_AIC8800){ + return system_config_8800(usb_dev); + }else if(usb_dev->chipid == PRODUCT_ID_AIC8800D80){ + return system_config_8800d80(usb_dev); + }else if(usb_dev->chipid == PRODUCT_ID_AIC8800D80X2){ + return system_config_8800d80x2(usb_dev); + }else{ + return -1; + } +} + + +static int system_reboot(struct aic_usb_dev *usb_dev) +{ + int syscfg_num; + int ret, cnt; + + syscfg_num = sizeof(sys_reboot_tbl) / sizeof(u32) / 2; + for (cnt = 0; cnt < syscfg_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, sys_reboot_tbl[cnt][0], sys_reboot_tbl[cnt][1]); + if (ret) { + printk("%x write fail: %d\n", sys_reboot_tbl[cnt][0], ret); + return ret; + } + } + return 0; +} + +static int rf_config(struct aic_usb_dev *usb_dev) +{ + int ret; + ret = rwnx_send_dbg_mem_mask_write_req(usb_dev, + rf_tbl_masked[0][0], rf_tbl_masked[0][1], rf_tbl_masked[0][2]); + if (ret) { + printk("rf config %x write fail: %d\n", rf_tbl_masked[0][0], ret); + } + + return ret; +} + +static int patch_config(struct aic_usb_dev *usb_dev) +{ + + int ret = 0; + int cnt = 0; + int patch_num = 0; + uint32_t start_addr = 0x1e6000; + u32 patch_addr = start_addr; + u32 config_base; + struct dbg_mem_read_cfm rd_patch_addr_cfm; + const u32 rd_patch_addr = RAM_FW_ADDR + 0x0180; + int tmp_cnt = 0; + int adap_patch_num = 0; + + printk("%s enter \r\n", __func__); + + //if (testmode) { + patch_num = sizeof(patch_tbl)/4; + + printk("Read FW mem: %08x\n", rd_patch_addr); + if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, rd_patch_addr, &rd_patch_addr_cfm))) { + printk("patch rd fail\n"); + } + + printk("%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata); + config_base = rd_patch_addr_cfm.memdata; + + //if (testmode == FW_NORMAL_MODE) { + if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e5318, patch_addr))) { + printk("%x write fail\n", 0x1e5318); + } + + if(adap_test){ + printk("%s for adaptivity test \r\n", __func__); + adap_patch_num = sizeof(adaptivity_patch_tbl)/4; + if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e531c, patch_num + adap_patch_num))) { + printk("%x write fail\n", 0x1e531c); + } + }else{ + if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e531c, patch_num))) { + printk("%x write fail\n", 0x1e531c); + } + } + //}else if(testmode == FW_TEST_MODE){//for old rf fw + // if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e4d78, patch_addr))) { + // printk("%x write fail\n", 0x1e4d80); + // } + // if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e4d7C, patch_num))) { + // printk("%x write fail\n", 0x1e4d84); + // } + //} + + for(cnt = 0; cnt < patch_num/2; cnt+=1) + { + if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, patch_tbl[cnt][0]+config_base))) { + printk("%x write fail\n", start_addr+8*cnt); + } + + if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, patch_tbl[cnt][1]))) { + printk("%x write fail\n", start_addr+8*cnt+4); + } + + } + + tmp_cnt = cnt; + + if(adap_test){ + for(cnt = 0; cnt < adap_patch_num/2; cnt+=1) + { + if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*(cnt+tmp_cnt), adaptivity_patch_tbl[cnt][0]+config_base))) { + printk("%x write fail\n", start_addr+8*cnt); + } + + if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*(cnt+tmp_cnt)+4, adaptivity_patch_tbl[cnt][1]))) { + printk("%x write fail\n", start_addr+8*cnt+4); + } + } + } + + return ret; +#if 0 + } else { + patch_num = sizeof(patch_tbl) / sizeof(u32) / 2; + + for(cnt = 0; cnt < patch_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, patch_tbl[cnt][0], patch_tbl[cnt][1]); + if(ret) { + printk("%x write fail: %d\n", patch_tbl[cnt][0], ret); + break; + } + } + return ret; + } +#endif +} + +#if 0 +static int bt_config(struct aic_usb_dev *usb_dev) +{ + int trap_num = sizeof(bt_config_tbl) / sizeof(u32) / 2; + int ret, cnt; + + printk("%s enter \r\n", __func__); + for(cnt = 0; cnt < trap_num; cnt++) { + ret = rwnx_send_dbg_mem_write_req(usb_dev, bt_config_tbl[cnt][0], bt_config_tbl[cnt][1]); + if(ret) { + printk("%x write fail: %d\n", bt_config_tbl[cnt][0], ret); + break; + } + } + return ret; +} +#endif + +static int get_paring_ids(char* c_paringids, int* i_paringids){ + int i = 0; + int len = strlen(c_paringids); + int paring_id_num = 0; + char temp_paring_id[5]; + +//get paring_id_num + for(i = 4; i < len; i += 5){ + if(c_paringids[i] == ','){ + paring_id_num++; + }else if(c_paringids[i] == 0x00){ + break; + }else{ + paring_id_num = 0; + break; + } + } + + if(paring_id_num == 0 && len == 0){ + printk("%s paring_id_num:%d \r\n", __func__, paring_id_num); + return 0; + }else{ + paring_id_num++; + } + +//parsing paring_id + for(i = 0; i < paring_id_num; i++){ + memset(temp_paring_id, 0, 5); + temp_paring_id[0] = c_paringids[(i * 5)]; + temp_paring_id[1] = c_paringids[(i * 5) + 1]; + temp_paring_id[2] = c_paringids[(i * 5) + 2]; + temp_paring_id[3] = c_paringids[(i * 5) + 3]; + i_paringids[i] = rwnx_atoli(temp_paring_id); + } + + return paring_id_num; +} + +static int aicloadfw_chipmatch(struct aic_usb_dev *usb_dev, u16 vid, u16 pid){ + + if(pid == USB_DEVICE_ID_AIC){ + usb_dev->chipid = PRODUCT_ID_AIC8800; + AICWFDBG(LOGINFO, "%s USE AIC8800\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8801){ + usb_dev->chipid = PRODUCT_ID_AIC8801; + AICWFDBG(LOGINFO, "%s USE AIC8801\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D80){ + usb_dev->chipid = PRODUCT_ID_AIC8800D80; + AICWFDBG(LOGINFO, "%s USE AIC8800D80\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D81){ + usb_dev->chipid = PRODUCT_ID_AIC8800D81; + AICWFDBG(LOGINFO, "%s USE AIC8800D81\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D40){ + usb_dev->chipid = PRODUCT_ID_AIC8800D80; + AICWFDBG(LOGINFO, "%s USE AIC8800D40\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D41){ + usb_dev->chipid = PRODUCT_ID_AIC8800D81; + AICWFDBG(LOGINFO, "%s USE AIC8800D41\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D80X2 && vid == USB_VENDOR_ID_AIC_V2){ + usb_dev->chipid = PRODUCT_ID_AIC8800D80X2; + AICWFDBG(LOGINFO, "%s USE AIC8800D80X2\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D81X2 && vid == USB_VENDOR_ID_AIC_V2){ + usb_dev->chipid = PRODUCT_ID_AIC8800D81X2; + AICWFDBG(LOGINFO, "%s USE AIC8800D81X2\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D89X2 && vid == USB_VENDOR_ID_AIC_V2){ + usb_dev->chipid = PRODUCT_ID_AIC8800D89X2; + AICWFDBG(LOGINFO, "%s USE AIC8800D89X2\r\n", __func__); + return 0; + }else if(pid == USB_DEVICE_ID_AIC_8800D40X2 && vid == USB_VENDOR_ID_AIC_V2){ + usb_dev->chipid = PRODUCT_ID_AIC8800D80X2; + AICWFDBG(LOGINFO, "%s USE AIC8800D40X2\r\n", __func__); + return 0; + }else{ + return -1; + } +} + +int aicfw_download_fw_8800(struct aic_usb_dev *usb_dev){ + //uint32_t paring_id = 0; + uint32_t* paring_ids; + int paring_id_num = 0; + int i = 0; + const u32 fw_addr = RAM_FW_ADDR; + +#ifdef CONFIG_M2D_OTA_AUTO_SUPPORT + if(testmode == FW_M2D_OTA_MODE){ + rwnx_plat_m2d_flash_ota_android(usb_dev,FW_M2D_OTA_NAME); + }else if(testmode == FW_NORMAL_MODE) { + rwnx_plat_m2d_flash_ota_check(usb_dev,FW_M2D_OTA_NAME); + } +#endif + if(testmode == FW_TEST_MODE){ + if (rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_ADDR, FW_RF_BASE_NAME)) { + return -1; + } + + if (chip_id == CHIP_REV_U03) { + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME_U03)) { + return -1;; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR_U03, FW_PATCH_BASE_NAME_U03)) { + return -1;; + } + } else { + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME)) { + return -1;; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR, FW_PATCH_BASE_NAME)) { + return -1;; + } + } + +#if 0 + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_RF_ADID_BASE_NAME)) { + goto out_free_bus; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR, FW_RF_PATCH_BASE_NAME)) { + goto out_free_bus; + } +#endif + } else if(testmode == FW_BLE_SCAN_WAKEUP_MODE){ +#if 0 + paring_id = rwnx_atoli(paringid); + rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, FW_BLE_SCAN_WAKEUP_NAME); + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF00, 0x53454C42);//magic_num + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF04, ble_scan_wakeup_reboot_time);//reboot time + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF08, paring_id); + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF0c, paring_id); + rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, HOST_START_APP_AUTO); +#endif +#if 1 + paring_ids = (uint32_t*)kmalloc(sizeof(uint32_t) * 8, GFP_KERNEL); + rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, FW_BLE_SCAN_WAKEUP_NAME); + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF00, 0x53454C42);//magic_num + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF04, ble_scan_wakeup_reboot_time);//reboot time + paring_id_num = get_paring_ids(paringid, paring_ids); + for(i = 0; i < paring_id_num; i++){ + printk("paring_ids[%d]:0x%X \r\n", i, paring_ids[i]); + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF08 + (4 * i), paring_ids[i]); + } + rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, HOST_START_APP_AUTO); + kfree(paring_ids); +#endif + return -1; + + } else if(testmode == FW_BLE_SCAN_AD_FILTER_MODE){ +/* + data and ad_data_filter_mask instructions for use + ex. + data[18] = {0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42}; + mask = 1100 0000 0111 1111 1100 0000 0000 0000 = 0xc07fc000 + + data = 0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42 + mask = 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0...... fill 0 + + data & mask = "0x46 0x00" 0x00 0x00 0x00 0x00 0x00 0x00 0x00 "0x30 0xff 0xff 0x43 0x52 0x45 0x4c 0x42" + using data & mask value condition to wakeup host_wake_bt gpio +*/ + struct ble_wakeup_param_t* wakeup_param = (struct ble_wakeup_param_t*)kmalloc(sizeof(struct ble_wakeup_param_t), GFP_KERNEL); + uint32_t *write_blocks = (uint32_t *)wakeup_param; + + printk("%s ble scan wakeup \r\n", __func__); + + memset(wakeup_param, 0, sizeof(struct ble_wakeup_param_t)); + rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, FW_BLE_SCAN_AD_FILTER_NAME); + wakeup_param->magic_num = 0x53454C42;//magic_num + wakeup_param->delay_scan_to = 1000;//delay start scan time(ms) + wakeup_param->reboot_to = ble_scan_wakeup_reboot_time;//reboot time + /******************************************************************/ + ///gpio_trigger_idx : 0 if wakeup_param->gpio_dft_lvl[0]=0xfe,this idx will be invalid. + wakeup_param->gpio_num[0] = gpio_num;////default select gpiob2 for fw_wakeup_host + wakeup_param->gpio_dft_lvl[0] = gpio_dft_lvl;////0:defalut pull down, 1:default pull up + ///gpio_trigger_idx : 1 if wakeup_param->gpio_dft_lvl[1]=0xfe,this idx will be invalid. + wakeup_param->gpio_num[1] = 3;////default select gpiob2 for fw_wakeup_host + wakeup_param->gpio_dft_lvl[1] = 1;////0:defalut pull down, 1:default pull up + /********************************************************************/ + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 0 + { + const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[0].ad_len = 12; + wakeup_param->ad_filter[0].ad_type = 0x09; + memcpy(wakeup_param->ad_filter[0].ad_data, data,wakeup_param->ad_filter[0].ad_len-1);// 1111 1111 1110 0000 0000 0000 0000 0000 //0xffe00000 + wakeup_param->ad_filter[0].ad_data_mask = 0xffe00000; + wakeup_param->ad_filter[0].ad_role = ROLE_COMBO|(COMBO_0<<4); + wakeup_param->ad_filter[0].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[0].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[0].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 1 + { + const uint8_t data[2] = {0x12,0x18}; + wakeup_param->ad_filter[1].ad_len = 3; + wakeup_param->ad_filter[1].ad_type = 0x3; + memcpy(wakeup_param->ad_filter[1].ad_data, data,wakeup_param->ad_filter[1].ad_len-1);// 1100 0000 0000 0000 0000 0000 0000 0000 //0xc0000000 + wakeup_param->ad_filter[1].ad_data_mask = 0xc0000000; + wakeup_param->ad_filter[1].ad_role = ROLE_COMBO|(COMBO_0<<4); + wakeup_param->ad_filter[1].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[1].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[1].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 2 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[2].ad_len = 0; + wakeup_param->ad_filter[2].ad_type = 0; + //memcpy(wakeup_param->ad_filter[2].ad_data, data,wakeup_param->ad_filter[2].ad_len-1);// 1100 0000 0111 1111 1100 0000 0000 0000 //0xc07fc000 + wakeup_param->ad_filter[2].ad_data_mask = 0; + wakeup_param->ad_filter[2].ad_role = ROLE_ONLY; + wakeup_param->ad_filter[2].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[2].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[2].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 3 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[3].ad_len = 0; + wakeup_param->ad_filter[3].ad_type = 0; + //memcpy(wakeup_param->ad_filter[2].ad_data, data,wakeup_param->ad_filter[2].ad_len-1);// 1100 0000 0111 1111 1100 0000 0000 0000 //0xc07fc000 + wakeup_param->ad_filter[3].ad_data_mask = 0; + wakeup_param->ad_filter[3].ad_role = ROLE_COMBO|(COMBO_1<<4); + wakeup_param->ad_filter[3].gpio_trigger_idx = TG_IDX_0;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[3].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[3].wl_addr.addr[5] = 0; + } + /********************************************************************/ + //MAX_AD_FILTER_NUM=5 :num 4 + { + //const uint8_t data[11] = {0x59,0x4B,0x32,0x42,0x41,0x5F,0x54,0x45,0x53,0x54,0x33}; + wakeup_param->ad_filter[4].ad_len = 0; + wakeup_param->ad_filter[4].ad_type = 0x09; + //memcpy(wakeup_param->ad_filter[4].ad_data, data,wakeup_param->ad_filter[4].ad_len-1);// 1111 1111 1110 0000 0000 0000 0000 0000 //0xffe00000 + wakeup_param->ad_filter[4].ad_data_mask = 0xffe00000; + wakeup_param->ad_filter[4].ad_role = ROLE_COMBO|(COMBO_1<<4); + wakeup_param->ad_filter[4].gpio_trigger_idx = TG_IDX_0|TG_IDX_1;//0: match for wakeup_param->gpio_num[0] 1: match for wakeup_param->gpio_num[1] + /********************************************************************/ + /*enable white list addr for paired remote ble addr. if all 0: all addr will use ad filter + wl_addr have value xx: only remote addr in white_list_addr will use ad filter + ********************************************************************/ + wakeup_param->ad_filter[4].wl_addr.addr[0] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[1] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[2] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[3] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[4] = 0; + wakeup_param->ad_filter[4].wl_addr.addr[5] = 0; + } + + for(i = 0; i < (sizeof(struct ble_wakeup_param_t)/4 +1); i++){ + printk("write_blocks[%d]:0x%08X \r\n", i, write_blocks[i]); + rwnx_send_dbg_mem_write_req(usb_dev, 0x15FE00 + (4 * i), write_blocks[i]); + } + rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, HOST_START_APP_AUTO); + kfree(wakeup_param); + + return -1; + }else { + if (rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_ADDR, FW_BASE_NAME)) { + return -1;; + } + if (chip_id == CHIP_REV_U03) { + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME_U03)) { + return -1;; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR_U03, FW_PATCH_BASE_NAME_U03)) { + return -1;; + } + } else { + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME)) { + return -1;; + } + + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR, FW_PATCH_BASE_NAME)) { + return -1;; + } + } + } + if (chip_id == CHIP_REV_U03) { + if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME_U03)) { + return -1;; + } + } else { + if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME)) { + return -1;; + } + } + +#if 0 + if(testmode == FW_TEST_MODE){ + if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_PATCH_TEST_BASE_ADDR, FW_PATCH_TEST_BASE_NAME)) { + goto out_free_bus; + } + } +#endif + + if (rwnx_plat_userconfig_upload_android(usb_dev, FW_USERCONFIG_NAME)){ + return -1; + } + + if (patch_config(usb_dev)) { + return -1;; + } + + if (rf_config(usb_dev)){ + return -1;; + } + if (rwnx_send_dbg_start_app_req(usb_dev, fw_addr, HOST_START_APP_AUTO)) { + return -1; + } + + return 0; +} + + +int aicfw_download_fw(struct aic_usb_dev *usb_dev) +{ + if(usb_dev->chipid == PRODUCT_ID_AIC8800){ + return aicfw_download_fw_8800(usb_dev); + }else if(usb_dev->chipid == PRODUCT_ID_AIC8800D80){ + return aicfw_download_fw_8800d80(usb_dev); + }else if(usb_dev->chipid == PRODUCT_ID_AIC8800D80X2){ + return aicfw_download_fw_8800d80x2(usb_dev); + }else{ + return -1; + } +} + + +static int aicwf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + int ret = 0; + struct usb_device *usb = interface_to_usbdev(intf); + struct aicwf_bus *bus_if ; + struct device *dev = NULL; + struct aicwf_rx_priv* rx_priv = NULL; + struct aic_usb_dev *usb_dev = NULL; + + + AICWFDBG(LOGINFO, "%s vid:0x%X pid:0x%X icl:0x%X isc:0x%X ipr:0x%X \r\n", __func__, + id->idVendor, + id->idProduct, + id->bInterfaceClass, + id->bInterfaceSubClass, + id->bInterfaceProtocol); + + if(fw_loaded == 1 && + (id->idProduct == USB_DEVICE_ID_AIC_8801 || + id->idProduct == USB_DEVICE_ID_AIC_8800D81 || + id->idProduct == USB_DEVICE_ID_AIC_8800D41 || + id->idProduct == USB_DEVICE_ID_AIC_8800D81X2 || + id->idProduct == USB_DEVICE_ID_AIC_8800D89X2)){ + return -1; + } + + usb_dev = kzalloc(sizeof(struct aic_usb_dev), GFP_ATOMIC); + if (!usb_dev) { + return -ENOMEM; + } + + usb_dev->udev = usb; + usb_dev->dev = &usb->dev; + ret = aicloadfw_chipmatch(usb_dev, id->idVendor, id->idProduct); + if (ret) { + AICWFDBG(LOGERROR, "%s chip unsupport.\r\n", __func__); + goto out_free; + } + + usb_set_intfdata(intf, usb_dev); + + ret = aicwf_parse_usb(usb_dev, intf, id->idProduct); + if (ret) { + usb_err("aicwf_parse_usb err %d\n", ret); + goto out_free; + } + + ret = aicwf_usb_init(usb_dev); + if (ret) { + usb_err("aicwf_usb_init err %d\n", ret); + goto out_free; + } + + bus_if = kzalloc(sizeof(struct aicwf_bus), GFP_ATOMIC); + if (!bus_if) { + ret = -ENOMEM; + goto out_free_usb; + } + + dev = usb_dev->dev; + bus_if->dev = dev; + usb_dev->bus_if = bus_if; + bus_if->bus_priv.usb = usb_dev; + dev_set_drvdata(dev, bus_if); + + bus_if->ops = &aicwf_usb_bus_ops; + + rx_priv = aicwf_rx_init(usb_dev); + if(!rx_priv) { + txrx_err("rx init failed\n"); + ret = -1; + goto out_free_bus; + } + usb_dev->rx_priv = rx_priv; + + ret = aicwf_bus_init(0, dev); + if (ret < 0) { + usb_err("aicwf_bus_init err %d\n", ret); + goto out_free_bus; + } + + ret = aicwf_bus_start(bus_if); + if (ret < 0) { + usb_err("aicwf_bus_start err %d\n", ret); + goto out_free_bus; + } + + aic_bt_platform_init(usb_dev); + + if ((usb->speed != USB_SPEED_HIGH) && (usb->speed != USB_SPEED_SUPER)) { + printk("Aic USB device detected speed = %d\n", usb->speed); + system_reboot(usb_dev); + goto out_free_bus; + } + + if(fw_loaded == 0 && + (usb_dev->chipid == PRODUCT_ID_AIC8801 || + usb_dev->chipid == PRODUCT_ID_AIC8800D81|| + usb_dev->chipid == PRODUCT_ID_AIC8800D81X2 || + usb_dev->chipid == PRODUCT_ID_AIC8800D89X2)){ + rwnx_send_reboot(usb_dev); + goto out_free_bus; + } + + if (system_config(usb_dev)) { + goto out_free_bus; + } + + if (aicfw_download_fw(usb_dev)){ + goto out_free_bus; + } + + usb_dev->app_cmp = true; + fw_loaded = 1; + + return 0; + +out_free_bus: + aicwf_bus_deinit(dev); + aicwf_rx_deinit(usb_dev->rx_priv); + kfree(bus_if); +out_free_usb: + aicwf_usb_deinit(usb_dev); +out_free: + usb_err("failed with errno %d\n", ret); + kfree(usb_dev); + usb_set_intfdata(intf, NULL); + return ret; +} + +static void aicwf_usb_disconnect(struct usb_interface *intf) +{ + struct aic_usb_dev *usb_dev = + (struct aic_usb_dev *) usb_get_intfdata(intf); + + printk("%s enter \r\n", __func__); + + if (!usb_dev) + return; + + aicwf_bus_deinit(usb_dev->dev); + aicwf_usb_deinit(usb_dev); + if (usb_dev->rx_priv) + aicwf_rx_deinit(usb_dev->rx_priv); + kfree(usb_dev->bus_if); + kfree(usb_dev); +} +#if 0 +static int aicwf_usb_suspend(struct usb_interface *intf, pm_message_t state) +{ + struct aic_usb_dev *usb_dev = + (struct aic_usb_dev *) usb_get_intfdata(intf); + + aicwf_usb_state_change(usb_dev, USB_SLEEP_ST); + aicwf_bus_stop(usb_dev->bus_if); + return 0; +} + +static int aicwf_usb_resume(struct usb_interface *intf) +{ + struct aic_usb_dev *usb_dev = + (struct aic_usb_dev *) usb_get_intfdata(intf); + + if (usb_dev->state == USB_UP_ST) + return 0; + + aicwf_bus_start(usb_dev->bus_if); + return 0; +} + +static int aicwf_usb_reset_resume(struct usb_interface *intf) +{ + return aicwf_usb_resume(intf); +} +#endif +static struct usb_device_id aicwf_usb_id_table[] = { + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC)}, + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8801)}, + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8800D80)}, + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8800D81)}, + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8800D40)}, + {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8800D41)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_DEVICE_ID_AIC_8800D80X2)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_DEVICE_ID_AIC_8800D81X2)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_DEVICE_ID_AIC_8800D89X2)}, + {USB_DEVICE(USB_VENDOR_ID_AIC_V2, USB_DEVICE_ID_AIC_8800D40X2)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, aicwf_usb_id_table); + +static struct usb_driver aicwf_usbdrvr = { + .name = KBUILD_MODNAME, + .probe = aicwf_usb_probe, + .disconnect = aicwf_usb_disconnect, + .id_table = aicwf_usb_id_table, + //.suspend = aicwf_usb_suspend, + //.resume = aicwf_usb_resume, + //.reset_resume = aicwf_usb_reset_resume, + .supports_autosuspend = 0, + .disable_hub_initiated_lpm = 1, +}; + +void aicwf_usb_register(void) +{ + if (usb_register(&aicwf_usbdrvr) < 0) { + usb_err("usb_register failed\n"); + } +} + +void aicwf_usb_exit(void) +{ + usb_deregister(&aicwf_usbdrvr); +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_usb.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_usb.h new file mode 100644 index 0000000..f31c6cd --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/aicwf_usb.h @@ -0,0 +1,239 @@ +/** + * aicwf_usb.h + * + * USB function declarations + * + * Copyright (C) AICSemi 2018-2020 + */ + +#ifndef _AICWF_USB_H_ +#define _AICWF_USB_H_ + +#include +#include +#include +#include +#include + +#include "aicbluetooth_cmds.h" + +#ifdef AICWF_USB_SUPPORT + +/* USB Device ID */ +#define USB_VENDOR_ID_AIC 0xA69C +#define USB_VENDOR_ID_AIC_V2 0x368B +#define USB_DEVICE_ID_AIC 0x8800 +#define USB_DEVICE_ID_AIC_8801 0x8801 + +#define CHIP_REV_U01 0x1 +#define CHIP_REV_U02 0x3 +#define CHIP_REV_U03 0x7 +#define CHIP_REV_U04 0xf +#define CHIP_REV_U05 0x1f +#define CHIP_SUB_REV_U04 0x20 + +enum AICWF_IC{ + PRODUCT_ID_AIC8800 = 0, + PRODUCT_ID_AIC8801, + PRODUCT_ID_AIC8800DC, + PRODUCT_ID_AIC8800DW, + PRODUCT_ID_AIC8800D80, + PRODUCT_ID_AIC8800D81, + PRODUCT_ID_AIC8800D80X2, + PRODUCT_ID_AIC8800D81X2, + PRODUCT_ID_AIC8800D89X2, + PRODUCT_ID_AIC8800D40X2, +}; + + +#define AICWF_USB_RX_URBS (20) +#define AICWF_USB_TX_URBS (100) +#define AICWF_USB_TX_LOW_WATER (AICWF_USB_TX_URBS/4) +#define AICWF_USB_TX_HIGH_WATER (AICWF_USB_TX_LOW_WATER*3) +#define AICWF_USB_MAX_PKT_SIZE (2048) + +#ifdef CONFIG_RFTEST +#define FW_RF_PATCH_BASE_NAME "fw_patch.bin" +#define FW_RF_ADID_BASE_NAME "fw_adid.bin" +#define FW_RF_BASE_NAME "fmacfw_rf.bin" +#define FW_PATCH_BASE_NAME "fw_patch.bin" +#define FW_ADID_BASE_NAME "fw_adid.bin" +#define FW_BASE_NAME "fmacfw.bin" +#define FW_BLE_SCAN_WAKEUP_NAME "fw_ble_scan.bin" +#define FW_BLE_SCAN_AD_FILTER_NAME "fw_ble_scan_ad_filter.bin" + +#define FW_PATCH_BASE_NAME_PC "fw_patch.bin" +#define FW_ADID_BASE_NAME_PC "fw_adid.bin" +#define FW_BASE_NAME_PC "fmacfw.bin" +#define FW_RF_PATCH_BASE_NAME_PC "fw_patch.bin" +#define FW_RF_ADID_BASE_NAME_PC "fw_adid.bin" +#define FW_RF_BASE_NAME_PC "fmacfw_rf.bin" +#define FW_PATCH_TEST_BASE_NAME "fw_patch_test.bin" +#define FW_PATCH_BASE_NAME_U03 "fw_patch_u03.bin" +#define FW_ADID_BASE_NAME_U03 "fw_adid_u03.bin" + +#else +#define FW_PATCH_BASE_NAME "fw_patch.bin" +#define FW_ADID_BASE_NAME "fw_adid.bin" +#define FW_BASE_NAME "fmacfw.bin" +#endif + +#define FW_PATCH_TABLE_NAME "fw_patch_table.bin" +#define FW_PATCH_TABLE_NAME_U03 "fw_patch_table_u03.bin" +#define FW_USERCONFIG_NAME "aic_userconfig.txt" +#define FW_M2D_OTA_NAME "m2d_ota.bin" + + /*8800 use 0x100000, 8800D80 use 0x160000*/ +#define RAM_FW_BLE_SCAN_WAKEUP_ADDR_8800D80 0x00160000 +#define RAM_FW_BLE_SCAN_WAKEUP_ADDR 0x00100000 +#define RAM_FW_ADDR 0x00110000 +#define FW_RAM_ADID_BASE_ADDR 0x00161928 +#define FW_RAM_PATCH_BASE_ADDR 0x00100000 +#define FW_RAM_PATCH_BASE_ADDR_U03 0x00100000 +#define FW_PATCH_TEST_BASE_ADDR 0x00100000 +#define RAM_FW_BLE_WAKEUP_OUT_ADDR 0x0015f000 + +enum { + FW_NORMAL_MODE, + FW_TEST_MODE, + FW_BLE_SCAN_WAKEUP_MODE, + FW_M2D_OTA_MODE, + FW_DPDCALIB_MODE, + FW_BLE_SCAN_AD_FILTER_MODE, +}; + + +#if 0 +#define FW_NAME2 FW_BASE_NAME".bin" +#define FW_PATCH_BIN_NAME FW_PATCH_BASE_NAME".bin" +#define FW_ADID_BIN_NAME FW_ADID_BASE_NAME".bin" +#endif + +typedef enum { + USB_TYPE_DATA = 0X00, + USB_TYPE_CFG = 0X10, + USB_TYPE_CFG_CMD_RSP = 0X11, + USB_TYPE_CFG_DATA_CFM = 0X12 +} usb_type; + +enum aicwf_usb_state { + USB_DOWN_ST, + USB_UP_ST, + USB_SLEEP_ST +}; + +#define MAX_AD_FILTER_NUM 5// Max AD Filter num +#define MAX_GPIO_TRIGGER_NUM 2// Max user config num of gpio +#define MAX_ROLE_COMNO_IDX_NUM 2// Max num of ad role type combo,form( enum gpio_combo_idx) + + +#define AD_ROLE_FLAG 0x0f +#define ROLE_COMBO_IDX_FLAG 0xf0 + +enum ad_role_type { + ROLE_ONLY,// ROLE_ONLY will trigger wake up immediately. + ROLE_COMBO,//ROLE_COMBO will trigger When all the conditions (ad_role == ROLE_COMBO,and ad_filter is matching)are met. +}; + +enum gpio_combo_idx { + COMBO_0, + COMBO_1, +}; + +enum gpio_trigger_bit { + TG_IDX_0 = (1<<0), + TG_IDX_1 = (1<<1), +}; + +/* BD Address */ +typedef struct { + uint8_t addr[6]; +}bdaddr_t; + +struct wakeup_ad_data_filter { + uint32_t ad_data_mask; + uint8_t gpio_trigger_idx; + uint8_t ad_role;//from enum ad_role_type + uint8_t ad_len; + uint8_t ad_type; + uint8_t ad_data[31]; + bdaddr_t wl_addr; +}; + +struct ble_wakeup_param_t { + uint32_t magic_num;// "BLES" = 0x53454C42 + uint32_t delay_scan_to;// timeout for start scan in ms + uint32_t reboot_to;// timeout for reboot in ms + uint32_t gpio_num[MAX_GPIO_TRIGGER_NUM]; + uint32_t gpio_dft_lvl[MAX_GPIO_TRIGGER_NUM]; + struct wakeup_ad_data_filter ad_filter[MAX_AD_FILTER_NUM]; +}; + +struct aicwf_usb_buf { + struct list_head list; + struct aic_usb_dev *usbdev; + struct urb *urb; + struct sk_buff *skb; + #ifdef CONFIG_USB_NO_TRANS_DMA_MAP + u8 *data_buf; + dma_addr_t data_dma_trans_addr; + #endif + bool cfm; +}; + +struct aic_usb_dev { + struct rwnx_cmd_mgr cmd_mgr; + struct aicwf_bus *bus_if; + struct usb_device *udev; + struct device *dev; + struct aicwf_rx_priv* rx_priv; + enum aicwf_usb_state state; + + struct usb_anchor rx_submitted; + struct work_struct rx_urb_work; + + spinlock_t rx_free_lock; + spinlock_t tx_free_lock; + spinlock_t tx_post_lock; + spinlock_t tx_flow_lock; + + struct list_head rx_free_list; + struct list_head tx_free_list; + struct list_head tx_post_list; + + uint bulk_in_pipe; + uint bulk_out_pipe; +#ifdef CONFIG_USB_MSG_EP + uint msg_out_pipe; + uint use_msg_ep; +#endif + + int tx_free_count; + int tx_post_count; + + struct aicwf_usb_buf usb_tx_buf[AICWF_USB_TX_URBS]; + struct aicwf_usb_buf usb_rx_buf[AICWF_USB_RX_URBS]; + + int msg_finished; + wait_queue_head_t msg_wait; + ulong msg_busy; + struct urb *msg_out_urb; + #ifdef CONFIG_USB_NO_TRANS_DMA_MAP + dma_addr_t cmd_dma_trans_addr; + #endif + + u16 chipid; + bool tbusy; + bool app_cmp; + u32 fw_version_uint; +}; + +extern void aicwf_usb_exit(void); +extern void aicwf_usb_register(void); +extern void aicwf_usb_tx_flowctrl(struct aic_usb_dev *usb_dev, bool state); +int usb_bustx_thread(void *data); +int usb_busrx_thread(void *data); +int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv); + +#endif /* AICWF_USB_SUPPORT */ +#endif /* _AICWF_USB_H_ */ diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/md5.c b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/md5.c new file mode 100755 index 0000000..3d7b653 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/md5.c @@ -0,0 +1,161 @@ +#include +#include "md5.h" + +unsigned char PADDING[]={0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +void MD5Init(MD5_CTX *context) +{ + context->count[0] = 0; + context->count[1] = 0; + context->state[0] = 0x67452301; + context->state[1] = 0xEFCDAB89; + context->state[2] = 0x98BADCFE; + context->state[3] = 0x10325476; +} +void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen) +{ + unsigned int i = 0,index = 0,partlen = 0; + index = (context->count[0] >> 3) & 0x3F; + partlen = 64 - index; + context->count[0] += inputlen << 3; + if(context->count[0] < (inputlen << 3)) + context->count[1]++; + context->count[1] += inputlen >> 29; + + if(inputlen >= partlen) + { + memcpy(&context->buffer[index],input,partlen); + MD5Transform(context->state,context->buffer); + for(i = partlen;i+64 <= inputlen;i+=64) + MD5Transform(context->state,&input[i]); + index = 0; + } + else + { + i = 0; + } + memcpy(&context->buffer[index],&input[i],inputlen-i); +} +void MD5Final(MD5_CTX *context,unsigned char digest[16]) +{ + unsigned int index = 0,padlen = 0; + unsigned char bits[8]; + index = (context->count[0] >> 3) & 0x3F; + padlen = (index < 56)?(56-index):(120-index); + MD5Encode(bits,context->count,8); + MD5Update(context,PADDING,padlen); + MD5Update(context,bits,8); + MD5Encode(digest,context->state,16); +} +void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len) +{ + unsigned int i = 0,j = 0; + while(j < len) + { + output[j] = input[i] & 0xFF; + output[j+1] = (input[i] >> 8) & 0xFF; + output[j+2] = (input[i] >> 16) & 0xFF; + output[j+3] = (input[i] >> 24) & 0xFF; + i++; + j+=4; + } +} +void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len) +{ + unsigned int i = 0,j = 0; + while(j < len) + { + output[i] = (input[j]) | + (input[j+1] << 8) | + (input[j+2] << 16) | + (input[j+3] << 24); + i++; + j+=4; + } +} +void MD5Transform(unsigned int state[4],unsigned char block[64]) +{ + unsigned int a = state[0]; + unsigned int b = state[1]; + unsigned int c = state[2]; + unsigned int d = state[3]; + unsigned int x[64]; + MD5Decode(x,block,64); + FF(a, b, c, d, x[ 0], 7, 0xd76aa478); /* 1 */ + FF(d, a, b, c, x[ 1], 12, 0xe8c7b756); /* 2 */ + FF(c, d, a, b, x[ 2], 17, 0x242070db); /* 3 */ + FF(b, c, d, a, x[ 3], 22, 0xc1bdceee); /* 4 */ + FF(a, b, c, d, x[ 4], 7, 0xf57c0faf); /* 5 */ + FF(d, a, b, c, x[ 5], 12, 0x4787c62a); /* 6 */ + FF(c, d, a, b, x[ 6], 17, 0xa8304613); /* 7 */ + FF(b, c, d, a, x[ 7], 22, 0xfd469501); /* 8 */ + FF(a, b, c, d, x[ 8], 7, 0x698098d8); /* 9 */ + FF(d, a, b, c, x[ 9], 12, 0x8b44f7af); /* 10 */ + FF(c, d, a, b, x[10], 17, 0xffff5bb1); /* 11 */ + FF(b, c, d, a, x[11], 22, 0x895cd7be); /* 12 */ + FF(a, b, c, d, x[12], 7, 0x6b901122); /* 13 */ + FF(d, a, b, c, x[13], 12, 0xfd987193); /* 14 */ + FF(c, d, a, b, x[14], 17, 0xa679438e); /* 15 */ + FF(b, c, d, a, x[15], 22, 0x49b40821); /* 16 */ + + /* Round 2 */ + GG(a, b, c, d, x[ 1], 5, 0xf61e2562); /* 17 */ + GG(d, a, b, c, x[ 6], 9, 0xc040b340); /* 18 */ + GG(c, d, a, b, x[11], 14, 0x265e5a51); /* 19 */ + GG(b, c, d, a, x[ 0], 20, 0xe9b6c7aa); /* 20 */ + GG(a, b, c, d, x[ 5], 5, 0xd62f105d); /* 21 */ + GG(d, a, b, c, x[10], 9, 0x2441453); /* 22 */ + GG(c, d, a, b, x[15], 14, 0xd8a1e681); /* 23 */ + GG(b, c, d, a, x[ 4], 20, 0xe7d3fbc8); /* 24 */ + GG(a, b, c, d, x[ 9], 5, 0x21e1cde6); /* 25 */ + GG(d, a, b, c, x[14], 9, 0xc33707d6); /* 26 */ + GG(c, d, a, b, x[ 3], 14, 0xf4d50d87); /* 27 */ + GG(b, c, d, a, x[ 8], 20, 0x455a14ed); /* 28 */ + GG(a, b, c, d, x[13], 5, 0xa9e3e905); /* 29 */ + GG(d, a, b, c, x[ 2], 9, 0xfcefa3f8); /* 30 */ + GG(c, d, a, b, x[ 7], 14, 0x676f02d9); /* 31 */ + GG(b, c, d, a, x[12], 20, 0x8d2a4c8a); /* 32 */ + + /* Round 3 */ + HH(a, b, c, d, x[ 5], 4, 0xfffa3942); /* 33 */ + HH(d, a, b, c, x[ 8], 11, 0x8771f681); /* 34 */ + HH(c, d, a, b, x[11], 16, 0x6d9d6122); /* 35 */ + HH(b, c, d, a, x[14], 23, 0xfde5380c); /* 36 */ + HH(a, b, c, d, x[ 1], 4, 0xa4beea44); /* 37 */ + HH(d, a, b, c, x[ 4], 11, 0x4bdecfa9); /* 38 */ + HH(c, d, a, b, x[ 7], 16, 0xf6bb4b60); /* 39 */ + HH(b, c, d, a, x[10], 23, 0xbebfbc70); /* 40 */ + HH(a, b, c, d, x[13], 4, 0x289b7ec6); /* 41 */ + HH(d, a, b, c, x[ 0], 11, 0xeaa127fa); /* 42 */ + HH(c, d, a, b, x[ 3], 16, 0xd4ef3085); /* 43 */ + HH(b, c, d, a, x[ 6], 23, 0x4881d05); /* 44 */ + HH(a, b, c, d, x[ 9], 4, 0xd9d4d039); /* 45 */ + HH(d, a, b, c, x[12], 11, 0xe6db99e5); /* 46 */ + HH(c, d, a, b, x[15], 16, 0x1fa27cf8); /* 47 */ + HH(b, c, d, a, x[ 2], 23, 0xc4ac5665); /* 48 */ + + /* Round 4 */ + II(a, b, c, d, x[ 0], 6, 0xf4292244); /* 49 */ + II(d, a, b, c, x[ 7], 10, 0x432aff97); /* 50 */ + II(c, d, a, b, x[14], 15, 0xab9423a7); /* 51 */ + II(b, c, d, a, x[ 5], 21, 0xfc93a039); /* 52 */ + II(a, b, c, d, x[12], 6, 0x655b59c3); /* 53 */ + II(d, a, b, c, x[ 3], 10, 0x8f0ccc92); /* 54 */ + II(c, d, a, b, x[10], 15, 0xffeff47d); /* 55 */ + II(b, c, d, a, x[ 1], 21, 0x85845dd1); /* 56 */ + II(a, b, c, d, x[ 8], 6, 0x6fa87e4f); /* 57 */ + II(d, a, b, c, x[15], 10, 0xfe2ce6e0); /* 58 */ + II(c, d, a, b, x[ 6], 15, 0xa3014314); /* 59 */ + II(b, c, d, a, x[13], 21, 0x4e0811a1); /* 60 */ + II(a, b, c, d, x[ 4], 6, 0xf7537e82); /* 61 */ + II(d, a, b, c, x[11], 10, 0xbd3af235); /* 62 */ + II(c, d, a, b, x[ 2], 15, 0x2ad7d2bb); /* 63 */ + II(b, c, d, a, x[ 9], 21, 0xeb86d391); /* 64 */ + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; +} diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/md5.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/md5.h new file mode 100755 index 0000000..6ed5c0f --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/md5.h @@ -0,0 +1,48 @@ +#ifndef MD5_H +#define MD5_H + +typedef struct +{ + unsigned int count[2]; + unsigned int state[4]; + unsigned char buffer[64]; +}MD5_CTX; + + +#define F(x,y,z) ((x & y) | (~x & z)) +#define G(x,y,z) ((x & z) | (y & ~z)) +#define H(x,y,z) (x^y^z) +#define I(x,y,z) (y ^ (x | ~z)) +#define ROTATE_LEFT(x,n) ((x << n) | (x >> (32-n))) +#define FF(a,b,c,d,x,s,ac) \ + { \ + a += F(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +#define GG(a,b,c,d,x,s,ac) \ + { \ + a += G(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +#define HH(a,b,c,d,x,s,ac) \ + { \ + a += H(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +#define II(a,b,c,d,x,s,ac) \ + { \ + a += I(b,c,d) + x + ac; \ + a = ROTATE_LEFT(a,s); \ + a += b; \ + } +void MD5Init(MD5_CTX *context); +void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen); +void MD5Final(MD5_CTX *context,unsigned char digest[16]); +void MD5Transform(unsigned int state[4],unsigned char block[64]); +void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len); +void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len); + +#endif diff --git a/usr/src/AIC8800/drivers/aic8800/aic_load_fw/rwnx_version_gen.h b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/rwnx_version_gen.h new file mode 100755 index 0000000..efdc3b4 --- /dev/null +++ b/usr/src/AIC8800/drivers/aic8800/aic_load_fw/rwnx_version_gen.h @@ -0,0 +1,4 @@ +#define RWNX_VERS_REV "1a4b0054d2M (master)" +#define RWNX_VERS_MOD "6.4.3.0" +#define RWNX_VERS_BANNER "rwnx v6.4.3.0 - 1a4b0054d2M (master)" +#define RELEASE_DATE "2025_0605_3d4cc869" diff --git a/usr/src/AIC8800/fw/aic8800D80/aic_userconfig_8800d80.txt b/usr/src/AIC8800/fw/aic8800D80/aic_userconfig_8800d80.txt new file mode 100644 index 0000000..553fe60 --- /dev/null +++ b/usr/src/AIC8800/fw/aic8800D80/aic_userconfig_8800d80.txt @@ -0,0 +1,110 @@ +# AIC USERCONFIG 2022/0803/1707 + +# txpwr_lvl +enable=1 +lvl_11b_11ag_1m_2g4=18 +lvl_11b_11ag_2m_2g4=18 +lvl_11b_11ag_5m5_2g4=18 +lvl_11b_11ag_11m_2g4=18 +lvl_11b_11ag_6m_2g4=18 +lvl_11b_11ag_9m_2g4=18 +lvl_11b_11ag_12m_2g4=18 +lvl_11b_11ag_18m_2g4=18 +lvl_11b_11ag_24m_2g4=16 +lvl_11b_11ag_36m_2g4=16 +lvl_11b_11ag_48m_2g4=15 +lvl_11b_11ag_54m_2g4=15 +lvl_11n_11ac_mcs0_2g4=18 +lvl_11n_11ac_mcs1_2g4=18 +lvl_11n_11ac_mcs2_2g4=18 +lvl_11n_11ac_mcs3_2g4=18 +lvl_11n_11ac_mcs4_2g4=16 +lvl_11n_11ac_mcs5_2g4=16 +lvl_11n_11ac_mcs6_2g4=15 +lvl_11n_11ac_mcs7_2g4=15 +lvl_11n_11ac_mcs8_2g4=14 +lvl_11n_11ac_mcs9_2g4=14 +lvl_11ax_mcs0_2g4=18 +lvl_11ax_mcs1_2g4=18 +lvl_11ax_mcs2_2g4=18 +lvl_11ax_mcs3_2g4=18 +lvl_11ax_mcs4_2g4=16 +lvl_11ax_mcs5_2g4=16 +lvl_11ax_mcs6_2g4=15 +lvl_11ax_mcs7_2g4=15 +lvl_11ax_mcs8_2g4=14 +lvl_11ax_mcs9_2g4=14 +lvl_11ax_mcs10_2g4=13 +lvl_11ax_mcs11_2g4=13 +lvl_11a_6m_5g=18 +lvl_11a_9m_5g=18 +lvl_11a_12m_5g=18 +lvl_11a_18m_5g=18 +lvl_11a_24m_5g=16 +lvl_11a_36m_5g=16 +lvl_11a_48m_5g=15 +lvl_11a_54m_5g=15 +lvl_11n_11ac_mcs0_5g=18 +lvl_11n_11ac_mcs1_5g=18 +lvl_11n_11ac_mcs2_5g=18 +lvl_11n_11ac_mcs3_5g=18 +lvl_11n_11ac_mcs4_5g=16 +lvl_11n_11ac_mcs5_5g=16 +lvl_11n_11ac_mcs6_5g=15 +lvl_11n_11ac_mcs7_5g=15 +lvl_11n_11ac_mcs8_5g=14 +lvl_11n_11ac_mcs9_5g=14 +lvl_11ax_mcs0_5g=18 +lvl_11ax_mcs1_5g=18 +lvl_11ax_mcs2_5g=18 +lvl_11ax_mcs3_5g=18 +lvl_11ax_mcs4_5g=16 +lvl_11ax_mcs5_5g=16 +lvl_11ax_mcs6_5g=14 +lvl_11ax_mcs7_5g=14 +lvl_11ax_mcs8_5g=13 +lvl_11ax_mcs9_5g=13 +lvl_11ax_mcs10_5g=12 +lvl_11ax_mcs11_5g=12 + +# txpwr_loss +loss_enable=0 +loss_value=2 + +# txpwr_ofst +ofst_enable=0 +ofst_2g4_11b_chan_1_4=0 +ofst_2g4_11b_chan_5_9=0 +ofst_2g4_11b_chan_10_13=0 +ofst_2g4_ofdm_highrate_chan_1_4=0 +ofst_2g4_ofdm_highrate_chan_5_9=0 +ofst_2g4_ofdm_highrate_chan_10_13=0 +ofst_2g4_ofdm_lowrate_chan_1_4=0 +ofst_2g4_ofdm_lowrate_chan_5_9=0 +ofst_2g4_ofdm_lowrate_chan_10_13=0 +ofst_5g_ofdm_lowrate_chan_42=0 +ofst_5g_ofdm_lowrate_chan_58=0 +ofst_5g_ofdm_lowrate_chan_106=0 +ofst_5g_ofdm_lowrate_chan_122=0 +ofst_5g_ofdm_lowrate_chan_138=0 +ofst_5g_ofdm_lowrate_chan_155=0 +ofst_5g_ofdm_highrate_chan_42=0 +ofst_5g_ofdm_highrate_chan_58=0 +ofst_5g_ofdm_highrate_chan_106=0 +ofst_5g_ofdm_highrate_chan_122=0 +ofst_5g_ofdm_highrate_chan_138=0 +ofst_5g_ofdm_highrate_chan_155=0 +ofst_5g_ofdm_midrate_chan_42=0 +ofst_5g_ofdm_midrate_chan_58=0 +ofst_5g_ofdm_midrate_chan_106=0 +ofst_5g_ofdm_midrate_chan_122=0 +ofst_5g_ofdm_midrate_chan_138=0 +ofst_5g_ofdm_midrate_chan_155=0 + +# xtal cap +xtal_enable=0 +xtal_cap=24 +xtal_cap_fine=31 + + + diff --git a/usr/src/AIC8800/fw/aic8800D80/calibmode_8800d80.bin b/usr/src/AIC8800/fw/aic8800D80/calibmode_8800d80.bin new file mode 100644 index 0000000..74a298d Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/calibmode_8800d80.bin differ diff --git a/usr/src/AIC8800/fw/aic8800D80/fmacfw_8800d80_u02.bin b/usr/src/AIC8800/fw/aic8800D80/fmacfw_8800d80_u02.bin new file mode 100644 index 0000000..7d86974 Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/fmacfw_8800d80_u02.bin differ diff --git a/usr/src/AIC8800/fw/aic8800D80/fmacfw_8800d80_u02_ipc.bin b/usr/src/AIC8800/fw/aic8800D80/fmacfw_8800d80_u02_ipc.bin new file mode 100644 index 0000000..4b03cd3 Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/fmacfw_8800d80_u02_ipc.bin differ diff --git a/usr/src/AIC8800/fw/aic8800D80/fw_adid_8800d80_u02.bin b/usr/src/AIC8800/fw/aic8800D80/fw_adid_8800d80_u02.bin new file mode 100644 index 0000000..ab3f057 Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/fw_adid_8800d80_u02.bin differ diff --git a/usr/src/AIC8800/fw/aic8800D80/fw_ble_scan_ad_filter.bin b/usr/src/AIC8800/fw/aic8800D80/fw_ble_scan_ad_filter.bin new file mode 100644 index 0000000..ca17f67 Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/fw_ble_scan_ad_filter.bin differ diff --git a/usr/src/AIC8800/fw/aic8800D80/fw_patch_8800d80_u02.bin b/usr/src/AIC8800/fw/aic8800D80/fw_patch_8800d80_u02.bin new file mode 100644 index 0000000..b3234d9 Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/fw_patch_8800d80_u02.bin differ diff --git a/usr/src/AIC8800/fw/aic8800D80/fw_patch_table_8800d80_u02.bin b/usr/src/AIC8800/fw/aic8800D80/fw_patch_table_8800d80_u02.bin new file mode 100644 index 0000000..4907160 Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/fw_patch_table_8800d80_u02.bin differ diff --git a/usr/src/AIC8800/fw/aic8800D80/lmacfw_rf_8800d80_u02.bin b/usr/src/AIC8800/fw/aic8800D80/lmacfw_rf_8800d80_u02.bin new file mode 100644 index 0000000..07716a6 Binary files /dev/null and b/usr/src/AIC8800/fw/aic8800D80/lmacfw_rf_8800d80_u02.bin differ diff --git a/usr/src/AIC8800/release_note.txt b/usr/src/AIC8800/release_note.txt new file mode 100644 index 0000000..a00ac84 --- /dev/null +++ b/usr/src/AIC8800/release_note.txt @@ -0,0 +1,1145 @@ +update_date:2022_1219_1126 +info: + 1. [8800d/bt] fix assert. + 2. [8800dc/wifi] fix assert for enable wifi after enabling bt for a while. + 3. [8800d/wifi] promote wifi performance in bad env. + +commit-id: ef581944f4f08156a4c60e6d7c713ec8fb45accb + +Test comment: + + +8800D + USB fw version: + fmacfw.bin: di Dec 14 2022 11:08:36 - g84013959 + fmacfw_rf.bin: di Oct 11 2022 11:23:34 - gb01a3750 + fw_patch_table: Dec 16 2022 14:18:58 - git 88a3221 + fw_patch_table_u03.bin: Dec 16 2022 14:23:04 - git bc3db5c + SDIO fw version: + fmacfw.bin: di Dec 14 2022 11:13:31 - g84013959 + fmacfw_rf.bin: di Oct 11 2022 11:30:28 - gb01a3750 + fw_patch_table.bin: Dec 16 2022 14:18:58 - git 88a3221 + fw_patch_table_u03.bin: Dec 16 2022 14:23:04 - git bc3db5c + +8800DC + fw version: + fmacfw_patch_8800dc.bin: 2ed779f13cca75c2a5ba65c848915b32 + fmacfw_rf_patch_8800dc.bin: ac6f68116a6b78a13dbd9020f307d510 + lmacfw_rf_8800dc.bin: ti Oct 21 2022 15:29:11 - gdcee3e4 d9a3b3e1552d8d36bfa7757df9f70f01 + fw_patch_8800dc.bin: Oct 25 2022 15:15:00 - git 130d430 d7375f040db0acab3eb2e0890f37d86b + fw_patch_table_8800dc.bin: Oct 25 2022 15:15:00 - git 130d430 f2bd4f13a67a32b8f454c6aab7c251f0 + fw_u02 version: + fmacfw_patch_8800dc_u02.bin: 5539cd428bec04132ce246ca6ae2a0de + lmacfw_rf_8800dc.bin: ti Oct 21 2022 15:29:11 - gdcee3e4 d9a3b3e1552d8d36bfa7757df9f70f01 + fw_patch_8800dc_u02.bin: Nov 16 2022 10:54:24 - git c81f3df 225ee3459c5663f0b767c6e7c7a8327f + fw_patch_table_8800dc_u02.bin: Nov 16 2022 10:54:24 - git c81f3df a507e5293c35cbaf304b53ec610a4fe7 +============================================================= + + +update_date:2022_0416_1710 +info: + 1. [USB/driver] Optimizing aic8800_fdrv and aic_load_fw tx/rx thread to stop when driver deinit. + 2. [SDIO/patch USB/patch] fix wifi gain table, adaptivity pta of sco and afh assert. + +commit-id: 76586809b4e6f883305416d04206cadffbe50595 + +Test comment: + + +USB fw version: + fmacfw.bin: di Apr 16 2022 17:03:03 - g2fdc0afd + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table: Apr 16 2022 14:09:06 - git 1286399 + fw_patch_table_u03.bin: Apr 16 2022 14:21:14 - git ee35bea +SDIO fw version: + fmacfw.bin: di Apr 16 2022 17:05:37 - g2fdc0afd + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Apr 16 2022 14:09:06 - git 1286399 + fw_patch_table_u03.bin: Apr 16 2022 14:21:14 - git ee35bea +============================================================= + + +update_date:2022_0411_2122 +info: + 1. [USB/driver] fix aic_load_fw build error. + 2. [USB/driver] modify m2d loading process. + +commit-id: b39dfdd0c6c45ccf65794732a88301eddd40c884 + +Test comment: + + +USB fw version: + fmacfw.bin: zh Apr 06 2022 19:20:28 - g90d9767e + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table: Apr 07 2022 17:58:46 - git fc23fc9 + fw_patch_table_u03.bin: Apr 06 2022 12:29:05 - git cdb0db6 +SDIO fw version: + fmacfw.bin: di Apr 07 2022 16:48:50 - g785b8061 + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Apr 07 2022 17:58:46 - git fc23fc9 + fw_patch_table_u03.bin: Apr 06 2022 12:29:05 - git cdb0db6 +============================================================= + +update_date:2022_0409_2307 +info: + 1. [SDIO/driver] fix aic8800_btlpm kernel crash when rmmod driver. + 2. [USB/driver] fix build error rxdesc_tag *rxdesc. + 3. [SDIO/driver USB/driver] add CONFIG_WPA3_FOR_OLD_KERNEL. + 4. [USB/fw] update m2d_ota.bin. + 5. [SDIO/patch USB/patch] update bt patch. + 6. [USB/fw] add fmacfw_m2d.bin + +commit-id: e86071484d92aa2201d39954d4962b6a1ba387a0 + +Test comment: + + +USB fw version: + fmacfw.bin: zh Apr 06 2022 19:20:28 - g90d9767e + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table: Apr 07 2022 17:58:46 - git fc23fc9 + fw_patch_table_u03.bin: Apr 06 2022 12:29:05 - git cdb0db6 +SDIO fw version: + fmacfw.bin: di Apr 07 2022 16:48:50 - g785b8061 + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Apr 07 2022 17:58:46 - git fc23fc9 + fw_patch_table_u03.bin: Apr 06 2022 12:29:05 - git cdb0db6 +============================================================= + + + +update_date:2022_0407_1739 +info: + 1. [SDIO/fw] update wifi fw to enable gpio wakeup. + +commit-id: e1a52a5405cce6adcec7eb5dca28b7bf9ac2829f + +Test comment: + + +USB fw version: + fmacfw.bin: zh Apr 06 2022 19:20:28 - g90d9767e + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Apr 06 2022 12:29:05 - git cdb0db6 +SDIO fw version: + di Apr 07 2022 16:48:50 - g785b8061 + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Apr 01 2022 20:05:14 - git 6273991 + fw_patch_table_u03.bin: Apr 06 2022 12:29:05 - git cdb0db6 +============================================================= + + + +update_date:2022_0406_1956 +info: + 1. [SDIO/driver USB/driver] add setting xtal freq by aic_userconfig.txt. + 2. [USB/fw] read flash mac addr. + 3. [USB/libbt-vendor] fix compiler error. + +commit-id: 74c4d1f7813b25e41c2bbfef3419e841d2a3258a + +Test comment: + + +USB fw version: + fmacfw.bin: zh Apr 06 2022 19:20:28 - g90d9767e + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Apr 06 2022 12:29:05 - git cdb0db6 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Apr 01 2022 20:05:14 - git 6273991 + fw_patch_table_u03.bin: Apr 01 2022 20:02:43 - git b3cbc78 +============================================================= + + +update_date:2022_0402_1014 +info: + 1. [bt/patch] update bt patch, add fw assert report support for USB and fix adv len overflow, ble reconnect, dut mode. + +commit-id: 23769f445b7414aa83bbe3844c421c6a86b469fa + +Test comment: + + +USB fw version: + fmacfw.bin: bo Mar 14 2022 11:43:55 - g5a39adc1 + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Apr 01 2022 20:02:43 - git b3cbc78 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Apr 01 2022 20:05:14 - git 6273991 + fw_patch_table_u03.bin: Apr 01 2022 20:02:43 - git b3cbc78 +============================================================= + + +update_date:2022_0401_1825 +info: + 1. [SDIO/driver] add CONFIG_SUPPORT_REALTIME_CHANGE_MAC is default y. + 2. [SDIO/driver USB/driver] fix adaptivity patch_tbl in aic_load_fw and aic8800_bsp. + 3. [SDIO/driver USB/driver] add CONFIG_SET_VENDOR_EXTENSION_IE. + 4. [SDIO/driver USB/driver] fix workaround for txq->idx == TXQ_INACTIVE BUG_ON(). + 5. [SDIO/patch] add rk3566 Android11 porting patch. + +commit-id: + +Test comment: + + +USB fw version: + fmacfw.bin: bo Mar 14 2022 11:43:55 - g5a39adc1 + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Mar 16 2022 16:25:56 - git 4e1c0fc + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +============================================================= + +update_date:2022_0328_1623 +info: + 1. [USB/driver] add CONFIG_SUPPORT_REALTIME_CHANGE_MAC is default y + +commit-id: 8caff4ac6e513fa2ad4bb13b96894c0493cd29b1 + +Test comment: + + +USB fw version: + fmacfw.bin: bo Mar 14 2022 11:43:55 - g5a39adc1 + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Mar 16 2022 16:25:56 - git 4e1c0fc + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +============================================================= + +update_date:2022_0325_1830 +info: + 1. [USB/driver] add adaptivity test in aic_load_fw + 2. [USB/driver] add CONFIG_STA_SCAN_WHEN_P2P_WORKING in aic8800_fdrv for customer to chose. + +commit-id: 8485d3fdc2d42b11a1f814ef89b7c33ed1eb449f + +Test comment: + + +USB fw version: + fmacfw.bin: bo Mar 14 2022 11:43:55 - g5a39adc1 + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Mar 16 2022 16:25:56 - git 4e1c0fc + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +============================================================= + + +update_date:2022_0322_1720 +info: + 1. [USB/driver]fix aicwf_usb_tx_flowctrl and other fc. + 2. [USB/driver]add tasklet config for tx(default n). + +commit-id: + +Test comment: + + +USB fw version: + fmacfw.bin: bo Mar 14 2022 11:43:55 - g5a39adc1 + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Mar 16 2022 16:25:56 - git 4e1c0fc + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +============================================================= + +update_date:2022_0321_1008 +info: + 1. [USB/fw]update msg_ep fmacfw_rf.bin. + 2. [USB/patch] update bt patch, disable le ch_up_proc, modify le itf ver, add lpm indicate. + +commit-id: 85ba6a1a319f4f824f91eeb11ba7337d1567b908 + +Test comment: + + +USB fw version: + fmacfw.bin: bo Mar 14 2022 11:43:55 - g5a39adc1 + fmacfw_rf.bin: zh Mar 16 2022 14:32:00 - g094d53efm + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Mar 16 2022 16:25:56 - git 4e1c0fc + fw_patch_table_u03.bin: Mar 16 2022 16:25:53 - git ecdf1b1 +============================================================= + +update_date:2022_0315_0959 +info: + 1. [USB/fw]Set wifi rx msg pkt size to 1028. + 2. [USB/driver] fix can not create wlan0 in /proc/net/wireless. + +commit-id: 79939aaba6a8517b47108dc6f18a75b57bc63f00 + +Test comment: + 1. [USB] usb plug out/in. + 2. [USB] Wifi and Bt Enable/Disable. + 3. [USB] Wifi connect/disconnect to ap. + 4. [USB] Bt connect to device of headset, phone, and other. + 5. [USB] Bt RF testing. + +USB fw version: + fmacfw.bin: bo Mar 14 2022 11:43:55 - g5a39adc1 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Mar 11 2022 21:00:58 - git 86d7a3c + fw_patch_table_u03.bin: Mar 12 2022 09:45:59 - git 8d08eeb +============================================================= + + +update_date:2022_0312_1017 +info: + 1. [SDIO/driver]add lpm_enable. + 2. [SDIO/patch]add lpm_enable. + + +commit-id: 929cbffc6ed48df8b38e3bf3f094694fcaa94ebc + +Test comment: + +USB fw version: + fmacfw.bin: bo Mar 09 2022 14:49:29 - g583f5d26 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table.bin: Mar 11 2022 21:00:58 - git 86d7a3c + fw_patch_table_u03.bin: Mar 12 2022 09:45:59 - git 8d08eeb +============================================================= + +update_date:2022_0310_1812 +info: + 1. [SDIO/libbt-vendor]fix crash when disable bt. + 2. [USB/driver] modify udelay(100) to mdelay(1) when wait cmd queue empty + 3. [USB/fw] add fmacfw_usbbt_txadaptivity.bin. + +commit-id: 561b345b1db42bffbf480fe8864c650e2500f950 + +Test comment: + +USB fw version: + fmacfw.bin: bo Mar 09 2022 14:49:29 - g583f5d26 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +============================================================= + +update_date:2022_0310_0952 +info: + 1. [USB]1.Separate wlan msg buffer from data buffer, enable MSG_EP. + +commit-id: 58cfa3efebd11f96c1ff33175645990c6f39c2a7 + +Test comment: + +USB fw version: + fmacfw.bin: bo Mar 09 2022 14:49:29 - g583f5d26 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +============================================================= + + +update_date:2022_0308_1131 +info: + 1. [bt/patch] update bt patch, modify for acl duplicate and sniff err ,disable ble ch map up proc + +commit-id: dc53883505572640065827873218fa3d436d3500 + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Mar 08 2022 11:04:57 - git 4653c9b +============================================================= + +update_date:2022_0304_1808 +info: + 1. [SDIO/driver] fix the problem that WiFi or Bluetooth cannot be enabled repeatedly for Rockchip platform. + +commit-id: 01943c1cdbbd090682f75be124b63d1c10be106d + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +============================================================= + + +update_date:2022_0303_1053 +info: + 1. [USB/driver] add CONFIG_USB_MSG_EP in aic_load_fw Makefile.(defualt n) + 2. [USB/driver] add CONFIG_SUPPORT_REALTIME_CHANGE_MAC in aic8800_fdrv Makefile. +commit-id: 63df4a58c966ddaa2ce3d6c470ada844e1201162 + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +============================================================= + +update_date:2022_0302_1428 +info: + 1. [SDIO/driver] add CONFIG_GPIO_WAKEUP in Makefile. +commit-id: 6e31feb491e13079215cf80d005c9d1d881a10f3 + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +============================================================= + +update_date:2022_0228_1730 +info: + 1. [SDIO/USB/bt_patch] update bt patch for U02&&U03, modify for pta and afh, remote feats. + 2. [SDIO/driver] add CONFIG_COEX in Makefile. + 3. [SDIO/libbt-vendor] modify rfkill path. +commit-id: 7bdc2f2151dc6a7ea5ddd7dadc8ce34f0e6edf0b + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 26 2022 11:11:56 - git 7a4d47f +============================================================= + + +update_date:2022_0224_1614 +info: + 1. [SDIO/USB/bt_patch] update bt patch for u02&&u03,for afh err, le con timeout,rf test and tx pwr's rd and wr +commit-id: 2a3085ef7848fe6fb80ecc3a938894e5179b2404 + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 24 2022 15:09:19 - git 6ffede3 +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 24 2022 15:09:19 - git 6ffede3 +============================================================= + +update_date:2022_0223_1539 +info: + 1. [SDIO/drver]modify rftest_result len to 9. + 2. [tool] update rftest tool. +commit-id: 38f007cea3f548502c0be625f689ffa75a34fbb7 + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 16 2022 15:15:22 - git bf2497c +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 16 2022 15:15:22 - git bf2497c +============================================================= + + +update_date:2022_0222_1647 +info: + 1. [USB/driver] fix can not parser Minus Sign for aic_userconfig.txt. + +commit-id: d45cf6e082e023f6d9237c6d797ff844b22d9776 + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 16 2022 15:15:22 - git bf2497c +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 16 2022 15:15:22 - git bf2497c +============================================================= + +update_date:2022_0218_1614 +info: + 1. [SDIO/USB] add CREATE_TRACE_POINT in Makefile. + +commit-id: 523ac419970f431ad9b86f54ae6f4d6a8f83a2c8 + +Test comment: + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Feb 16 2022 15:15:22 - git bf2497c +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Feb 16 2022 15:15:22 - git bf2497c +============================================================= + +update_date:2022_0211_1530 +info: + 1. [SDIO/driver] discard tdls feature for aic8800_fdrv. + 2. [USB/driver] add wowlan config(default n). + 3. [USB/driver] fix crash for tx flow control. + 4. [USB/driver] fix crash for usb plug out. + 5. [USB/SDIO] modify Makefile for build all driver. + +commit-id: a33288d81da772672832c9ee991423f29b8f6da6 + +Test comment: + 1. [USB] usb plug out/in. + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 29 2022 12:20:08 - ga0734829 + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +============================================================= + +update_date:2022_0126_1649 +info: + 1. [USB/driver] discard tdls feature for aic8800_fdrv . + +commit-id: 9ebba80cef9982bebb7679edb2ffd0c4a7b12af7 + +Test comment: + 1. [USB]Using support tdls phone and ping or iperf to 8800. + + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +============================================================= + +update_date:2022_0125_1054 +info: + 1. [USB/fw] fix can not connect to ap that ax only . + 2. [USB/driver] update htc handle. + +commit-id: 7b5aaf3e82f3d7906da3de6b2f9d231b668b0f02 + +Test comment: + 1. [USB] Try to connect ap that ax only. + + +USB fw version: + fmacfw.bin: di Jan 24 2022 20:49:02 - g9c46deec + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +SDIO fw version: + fmacfw.bin: di Jan 25 2022 10:23:44 - g9c46deec + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +============================================================= + +update_date:2022_0124_2322 +info: + 1. [USB/driver] add MM_APM_STALOSS_IND handle. + 2. [USB/driver] fix driver deadlock for ap mode. + 3. [USB/driver] fix wifi can not scan for wpa3 connect failed. + +commit-id: b250aa23adf278086ce1917ea588a55460a5225e + +Test comment: + 1. [USB] Try turning on the hotspot and connecting using a WiFi device. + 2. [USB] Deliberately use the wrong password to connect to the WPA3 route,Then try WiFi scanning. + +USB fw version: + fmacfw.bin: di Jan 18 2022 15:49:25 - ge44bcf97 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +SDIO fw version: + fmacfw.bin: di Jan 18 2022 15:51:48 - ge44bcf97 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +============================================================= + +update_date:2022_0122_1425 +info: + 1. [USB/driver] fix can not connect to wpa3 ap. + 2. [USB/driver] fix null point for country code. + +commit-id: ad94518e3dd72d876c4171807987f604ffb8f058 + +Test comment: + 1. [USB]reconnet to wpa3 ap + 2. [USB]try to select undefine country code. + +USB fw version: + fmacfw.bin: di Jan 18 2022 15:49:25 - ge44bcf97 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +SDIO fw version: + fmacfw.bin: di Jan 18 2022 15:51:48 - ge44bcf97 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +============================================================= + +update_date:2022_0120_1003 +info: + 1. [USB/driver] fix can not connect to wep ap. + +commit-id: 5f4030ddbe8f3b9dfc461d93b4ba0cb820d7e403 + +Test comment: + 1. [USB]reconnet to wep ap. + +USB fw version: + fmacfw.bin: di Jan 18 2022 15:49:25 - ge44bcf97 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +SDIO fw version: + fmacfw.bin: di Jan 18 2022 15:51:48 - ge44bcf97 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 19 2022 20:55:29 - git 08d031b +============================================================= + +update_date:2022_0118_1819 +info: + 1. [USB/fw patch] fix can not scan ap if load fw. + +commit-id: cf265d4fb9ce1e533a389bb92cc057199f5f69f6 + +Test comment: + 1. [USB]power on/off board to check wifi scan ap or not. + +USB fw version: + fmacfw.bin: di Jan 18 2022 15:49:25 - ge44bcf97 + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 17 2022 19:27:53 - git 673d32a +SDIO fw version: + fmacfw.bin: di Jan 18 2022 15:51:48 - ge44bcf97 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 17 2022 19:27:53 - git 673d32a +============================================================= + + +update_date:2022_0117_0955 +info: + 1. [USB/driver] add br featrue. + 2. [USB/driver] fix null point when rwnx_close. + 3. [USB/driver] fix fw crash probability for power on/off test. + 4. [SDIO/driver] fix can not load aic_userconfig.txt when ic no write vendor_info. + +commit-id: ec2f2bc043c9ff217639434fc755503d40829f7b + +Test comment: + 1. [USB]test hotspot turn on/off. + 2. [USB]power on/off board to check wifi enable or not. + 3. [SDIO] insmod wifi driver to check aic_userconfig.txt loading or not. + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 12 2022 17:28:36 - git 14b7cd5 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 12 2022 17:28:36 - git 14b7cd5 +============================================================= + +update_date:2022_0112_1227 +info: + 1. [USB/fw_rf] update fw_rf for get firmware version. + 2. [USB/aic_btusb] comment suspend set_bt_onoff(). + +commit-id: + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Jan 12 2022 11:38:42 - g0a2dfff4 + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +============================================================= + +update_date:2022_0108_1843 +info: + 1. [USB/driver] fix usb holdup for driver rmmod. + 2. [SDIO/driver] add country setting. + +commit-id: 4b947bff6e3310a81d0d1d9058ffdd2c6628e3d6 + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +============================================================= + +update_date:2022_0107_1226 +info: + 1. [USB/driver] Update regdb.c + +commit-id: f28c6c237d68c56398de8e25945947f972e3111e + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +============================================================= + +update_date:2022_0107_0938 +info: + 1. [USB SDIO/bt patch] Fix txpwr done time induced RF such as ACP fail. + +commit-id: b8a390164f58b741d6245d61fbe09ce053ee9809 + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 06 2022 20:25:33 - git ee558e4 +============================================================= + +update_date:2022_0106_1347 +info: + 1. [SDIO/driver] aic8800_fdrv get fw_path form aic8800_bsp. + 2. [SDIO/driver] aic8800_bsp support testmode. + +commit-id: ccce528499c7dbea9977e0531336be965eb4b50c + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Jan 04 2022 20:08:29 - git 42982a2 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Jan 06 2022 09:53:42 - gcc1cd5dd + fw_patch_table_u03.bin: Jan 04 2022 20:08:29 - git 42982a2 +============================================================= + +update_date:2022_0105_1102 +info: + 1. [USB/driver] Switch country code for below 3.14 old kernel. + +commit-id: 739943831abf0a172e427102cabe851c5094ad44 + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Jan 04 2022 20:08:29 - git 42982a2 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Dec 15 2021 16:34:37 - ge0aaa5a3m + fw_patch_table_u03.bin: Jan 04 2022 20:08:29 - git 42982a2 +============================================================= + +update_date:2022_0105_0941 +info: + 1. [USB/bt patch] Improve BT performance 2M to 3M. + +commit-id: f958f6c66449e8d6957e513b3912d8d2d5be65b7 + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Jan 04 2022 20:08:29 - git 42982a2 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Dec 15 2021 16:34:37 - ge0aaa5a3m + fw_patch_table_u03.bin: Jan 04 2022 20:08:29 - git 42982a2 +============================================================= + +update_date:2022_0104_1948 +info: + 1. [USB/driver] fix bt can't send hci command for ubuntu. + 2. [USB/driver] support kernel 5.11. +commit-id: ba0f9e429721b851b203c662518b2ad0671ab493 + +USB fw version: + fmacfw.bin: li Jan 04 2022 19:18:29 - gcc1cd5dd + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 31 2021 21:06:45 - git 8638e37 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Dec 15 2021 16:34:37 - ge0aaa5a3m + fw_patch_table_u03.bin: Dec 31 2021 21:06:45 - git 8638e37 +============================================================= + +update_date:2022_0104_1026 +info: + 1. [USB SDIO/bt patch] optimize pta. + 2. [USB/driver] fix country code rule. +commit-id: 40f8485dbf6ce5c14f026c1809db6718d2ca00c9 + +USB fw version: + fmacfw.bin: di Dec 29 2021 15:01:04 - g2b0f7f21 + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 31 2021 21:06:45 - git 8638e37 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Dec 15 2021 16:34:37 - ge0aaa5a3m + fw_patch_table_u03.bin: Dec 31 2021 21:06:45 - git 8638e37 +============================================================= + +update_date:2021_1231_1103 +info: + 1. [SDIO/driver] fix driver insmod crash for he_on is false. +commit-id: 11b374e2b207828f5ad1f9b7a8b02ce0b555c629 + +USB fw version: + fmacfw.bin: di Dec 29 2021 15:01:04 - g2b0f7f21 + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 27 2021 19:19:33 - git 91e17e5 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Dec 15 2021 16:34:37 - ge0aaa5a3m + fw_patch_table_u03.bin: Dec 27 2021 19:19:33 - git 91e17e5 +============================================================= + +update_date:2021_1231_1036 +info: + 1. [USB/driver] add display fw version. + 2. [USB/driver] fix reord for resend package. +commit-id: da2630d4244b3402ee633eeb42d9ac98b33cfbf8 + +USB fw version: + fmacfw.bin: di Dec 29 2021 15:01:04 - g2b0f7f21 + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 27 2021 19:19:33 - git 91e17e5 +SDIO fw version: + fmacfw.bin: di Dec 29 2021 14:59:47 - g2b0f7f21 + fmacfw_rf.bin: di Dec 15 2021 16:34:37 - ge0aaa5a3m + fw_patch_table_u03.bin: Dec 27 2021 19:19:33 - git 91e17e5 +============================================================= + +update_date:2021_1228_1605 +info: + 1. [USB/driver] modify reorder featrue. +commit-id: d534a5524836e64e53f9d8b932c03b6d92241307 + +fw version: + fmacfw.bin: di Dec 25 2021 17:47:34 - g70393c8c + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 27 2021 19:19:33 - git 91e17e5 +============================================================= + +update_date:2021_1227_1931 +info: + 1. [USB/bt patch] update bt patch for U03, remove read before write the tx abort regs. +commit-id: d886e4ac44b9e9589b41032207bc9a9505ea7304 + +fw version: + fmacfw.bin: di Dec 25 2021 17:47:34 - g70393c8c + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 27 2021 19:19:33 - git 91e17e5 +============================================================= + +update_date:2021_1227_0947 +info: + 1. [USB and SDIO/fw and bt patch] update fw and bt patch for bt only coex when wifi tput. +commit-id: bb0221de8daccfc1746d263612f2b5176d6b1c12 + +fw version: + fmacfw.bin: di Dec 25 2021 17:47:34 - g70393c8c + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 24 2021 21:12:33 - git 3c11860 +============================================================= + + +update_date:2021_1224_1908 +info: + 1. [USB/patch]add Android7.1 patch for rk3229 +commit-id: 7bee61ccc635c3374d5e37d0c347550efb2979b1 + +fw version: + fmacfw.bin: di Dec 21 2021 21:16:47 - ge1c43efc + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 22 2021 11:15:25 - git 3ec66e3 +============================================================= + +update_date:2021_1224_1627 +info: + 1. [SDIO/driver]support kernel 3.10. + 2. [SDIO/driver]fix sdio error for tx aggr size long than 32768 bytes for old kernel. +commit-id: c0d9c910c08b3467f9cd7deb762ed99753b086de + +fw version: + fmacfw.bin: di Dec 21 2021 21:16:47 - ge1c43efc + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 22 2021 11:15:25 - git 3ec66e3 +============================================================= + + +update_date:2021_1222_1556 +info: + 1. [USB/fw]update fmacfw.bin to fix usb communication for wifi and bt coex. + 2. [USB/patch]update bt patch for U03, for coex , afh, and btmode info. +commit-id: b0c30a0c0ae1282190818af483a09d2dd5b229b4 + +fw version: + fmacfw.bin: di Dec 21 2021 21:16:47 - ge1c43efc + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 22 2021 11:15:25 - git 3ec66e3 +============================================================= + +update_date:2021_1221_1134 +info: + 1. [USB/driver]support kernel 3.14 + +commit-id: 1bc82d65639c23ff2ab8eb327c6c570f04702230 + +fw version: + fmacfw.bin: di Dec 15 2021 15:50:54 - ge0aaa5a3 + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 17 2021 20:05:39 - git c09292a +============================================================= + +update_date:2021_1217_2009 +info: + 1. [USB/bt patch]update wifi bt coex. + +commit-id: 0d134f98568482ff2ec272333f50348d28faee8d + +fw version: + fmacfw.bin: di Dec 15 2021 15:50:54 - ge0aaa5a3 + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 17 2021 20:05:39 - git c09292a +============================================================= + + +update_date:2021_1217_1504 +info: + 1. [USB/driver]update reorder featrue. + +commit-id: a9fa6d33a56a5a3ac446f3611fb5ec7bda5bc886 + +fw version: + fmacfw.bin: di Dec 15 2021 15:50:54 - ge0aaa5a3 + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 13 2021 20:30:59 - git 083f531-dirty +============================================================= + +update_date:2021_1215_1727 +info: + 1. [USB/driver]fix get_station. + 2. [USB/driver]fix hiden ssid connection. + 3. [USB/fw]fix hiden ssid connection. + 4. [USB/fw_rf] update fmacfw_rf.bin + +commit-id: 68b92524ae033ad3b98b32e7511e6cb92f677370 + +fw version: + fmacfw.bin: di Dec 15 2021 15:50:54 - ge0aaa5a3 + fmacfw_rf.bin: zh Dec 15 2021 17:00:03 - gf6281463 + fw_patch_table_u03.bin: Dec 13 2021 20:30:59 - git 083f531-dirty +============================================================= + +update_date:2021_1214_0956 +info: + 1. [USB/bt patch]fix lag for iphone6 play music. + 2. [USB/driver]add bt rx test. + 3. [USB/drvier]fix miracast do not deal with sequence problems. +commit-id: 6f95ce436916da227e4bf1ab6abf310c23ddb7a7 + +fw version: + fmacfw.bin: di Dec 06 2021 12:24:15 - git 0d60f8f7 + fw_patch_table_u03.bin: Dec 13 2021 20:30:59 - git 083f531-dirty +============================================================= + +update_date:2021_1213_1121 +info: + 1. [USB/bt patch]Optimize bt reconnect. +commit-id: 8b422a09df61ee1d714fb7766aa9334d4dea4e7d + +fw version: + fmacfw.bin: di Dec 06 2021 12:24:15 - git 0d60f8f7 + fw_patch_table_u03.bin: Dec 11 2021 14:36:22 - git 78bcd14-dirty +============================================================= + +update_date:2021_1207_1043 +info: + 1. [USB/fw]fix p2p NoA and roc cancel. + 2. [USB/bt patch]fix bt coex and le connection and sniffer mode connection. +commit-id: 32b94c70dc87ede4859476200dff192c59991ad7 + +fw version: + fmacfw.bin: di Dec 06 2021 12:24:15 - git 0d60f8f7 + fw_patch_table_u03.bin: Dec 06 2021 21:25:20 - git 52bc883-dirty +============================================================= + +update_date:2021_1203_1806 +info: + 1. [USB/fw]fix watchdog for ic reboot. + 2. [USB/driver]add CONFIG option for usb rx tasklet. +commit-id: 6d9f92f9e3f01658909cb4f4358923fa4853af5c + +============================================================= + +update_date:2021_1202_2226 +info: + 1. [USB/SDIO/bt patch]fix for ble remote reconnect. +commit-id: 2ed74f33ed10269e4d87c294216e96eea82e8265 + +============================================================= + +update_date:2021_1201_1629 +info: + 1. [SDIO/driver]fix build error. + 2. [SDIO/driver]porting to rk3288 Android10. + 3. [SDIO/bt patch]update bt patch. +commit-id: 13e50fcc8b7f8f4543b4f1ee0427abbf80204a03 + +============================================================= + +update_date:2021_1128_2300 +info: + 1. [USB/bt patch]update bt patch. +commit-id: c19c68e143a63a4bf3df5d5d2f3748abb62dfb32 + +============================================================= + +update_date:2021_1125_2140 +info: + 1. [USB/bt patch]fix airpods reconnect. +commit-id: f635978e2af20a2f59821579360c798aacef7310 + +============================================================= +update_date:2021_1125_1824 +info: + 1. [USB/driver]add read ic vendor info to check 5g support or not. + 2. [USB/driver]fix crash for sta and p2p concurrent. +commit-id: 73b169651e675b5aa19cee56f5ae8d711003ad97 + +============================================================= +update_date:2021_1125_0009 +info: + 1. [USB/driver]update align function for usb. + 2. [USB/fw]fix p2p feature. + 3. [USB/patch]fix airpods reconnect. +commit-id: 96a36100997cef2644ad59b1218637235612a53c + +============================================================= +update_date:2021_1122_1554 +info: + 1. [USB/driver]fix aic_btusb for sco. + 2. [USB/fw]fix ADDBA for fmacfw.bin. +commit-id: 55cbf57ac6814e66638677f7795675927793b8f5 + +============================================================= +update_date:2021_1116_1044 +info: + 1. [USB/driver]add country code setting for android private command. + 2. [USB/fw]compatible not support power control max power device. +commit-id: 36608824b5a82b479fd5f1d38f134615491eafd2 + +============================================================= +update_date:2021_1109_1723 +info: + 1. [USB/driver]fix p2p discover and connect featrue. + 2. [USB/fw]update fmacfw.bin for p2p featrue. +commit-id: 0015564e505352af48d377025d44c4dc0d8eca39 + +============================================================= +update_date:2021_1105_1427 +info: + 1. [USB/driver]add struct rwnx_cmd array for rwnx_send_msg. +commit-id: 6289ff0202a71c9627f4777424d5dbd380cb0412 + +============================================================= +update_date:2021_1103_1124 +info: + 1. [USB/driver]support align 16 for usb controller, this feature is disabled by default. +commit-id: 34d18f7d996d6f63f66629b877590f59c41f842a + +============================================================= +update_date:2021_1027_1610 +info: + 1. [USB/driver]btusb support bt HSP. +commit-id: 768da6fca245d7c4d2fbe5878bfc3ec2ac51b29e + +============================================================= +update_date:2021_1026_2357 +info: + 1. [USB/fw]fix usb 8801 enumerate. +commit-id: 3a5e3daf1f978de8e46219c05a830e5bd6d57e39 + +============================================================= +update_date:2021_1024_2215 +info: + 1. [USB/driver]update rf test function. + 2. [USB/driver]fix driver work crash. + 3. [USB/driver]fix plug out 8801 driver crash. + 4. [USB/driver]align 8 bytes function. + 5. [USB/fw]fix 8801 enumerate. + 6. [USB/fw]fix wifi can't scan ap when wifi enable. + 7. [USB/fw]fix bt assert. +commit-id: 916eb9026576a30beb664466b5f42f9f8e941591