/* * Copyright (c) 2019-2025 Beijing Hanwei Innovation Technology Ltd. Co. and * its subsidiaries and affiliates (collectly called MKSEMI). * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into an MKSEMI * integrated circuit in a product or a software update for such product, * must reproduce the above copyright notice, this list of conditions and * the following disclaimer in the documentation and/or other materials * provided with the distribution. * * 3. Neither the name of MKSEMI nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * 4. This software, with or without modification, must only be used with a * MKSEMI integrated circuit. * * 5. Any software provided in binary form under this license must not be * reverse engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY MKSEMI "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL MKSEMI OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "wsf_queue.h" #include "wsf_buf.h" #include "mk_uart.h" #include "mk_power.h" #include "mk_misc.h" #include "mk_timer.h" #include "uwb_api.h" #include "uci_tl_comm.h" #include "uci_tl_task.h" #include "board.h" #if (UCI_INTF_PORT == 0) && (UCI_INTF_UART_WITH_IDLE_DET != 0) #define UCI_IDLE_TIMER TIMER0 #define UCI_IDLE_TIMER_ID TIMER_ID0 #define FINE_TUNING_FACTOR (2.5) #if 1 #define UCI_PORT UART_ID0 #define RX_DMA_CH (DMA_CH4) #define DMA_REQ_UART_RX DMA_REQ_UART0_RX #else #define UCI_PORT UART_ID1 #define RX_DMA_CH (DMA_CH6) #define DMA_REQ_UART_RX DMA_REQ_UART1_RX #endif #define TIM_EN_DMA_CH (DMA_CH0) #define TIM_RECNT_DMA_CH (DMA_CH1) static void uci_tl_up_req(void); static void uci_tl_setup(void); static bool uci_tl_up_is_active(void); static void uci_rx_callback(void *dev, uint32_t err_code); static void uci_tx_over_callback(void *dev, uint32_t err_code); static void idle_timer_callback(void *dev, uint32_t time); static void uci_tl_stop_rx(void); static void uci_tl_restart_rx(void); #if !UCI_INTF_UART_HS static void uci_tl_reset(void); #endif uci_tl_dev_t g_uci_tl_dev = { .uci_tl_setup = &uci_tl_setup, #if UCI_INTF_UART_HS .uci_tl_resume = &uci_tl_setup, #else .uci_tl_resume = &uci_tl_reset, #endif .uci_tl_rx_stop = &uci_tl_stop_rx, .uci_tl_rx_restart = &uci_tl_restart_rx, .uci_tl_up_is_active = &uci_tl_up_is_active, .uci_tl_up_req = &uci_tl_up_req, .uci_tl_timer_notify = NULL, }; static struct UART_CFG_T uci_uart_cfg = {.parity = UART_PARITY_NONE, .stop = UART_STOP_BITS_1, .data = UART_DATA_BITS_8, .flow = UART_FLOW_CONTROL_NONE, .rx_level = UART_RXFIFO_CHAR_1, .tx_level = UART_TXFIFO_EMPTY, .baud = UCI_INTF_UART_BAUD, .dma_en = true, .int_rx = false, .int_tx = false}; static uint8_t recv_buff[UCI_RX_BUFF_SIZE] = {0}; static struct UCI_TL_MSG_T *tl_up_msg = NULL; static bool tx_idle = true; static uint32_t idle_timer_ctrl_reg = TIMER_CTRL_INT_EN_MSK | TIMER_CTRL_ENABLE_MSK; static uint32_t idle_timer_val_reg; const static uint8_t retry_ntf[] = {0x60, 0x01, 0x00, 0x01, 0x0A}; static struct TIMER_CFG_T timer_cfg = { .extin_type = TIMER_EXTIN_NONE, .load = 0xffffffff, .int_en = true, .callback = idle_timer_callback, }; const static double tick_per_byte[BAUD_MAX] = { (FINE_TUNING_FACTOR)*624000000.0 / 1200, // BAUD_1200 (FINE_TUNING_FACTOR)*624000000.0 / 2400, // BAUD_2400 (FINE_TUNING_FACTOR)*624000000.0 / 4800, // BAUD_4800 (FINE_TUNING_FACTOR)*624000000.0 / 9600, // BAUD_9600 (FINE_TUNING_FACTOR)*624000000.0 / 19200, // BAUD_19200, (FINE_TUNING_FACTOR)*624000000.0 / 38400, // BAUD_38400, (FINE_TUNING_FACTOR)*624000000.0 / 57600, // BAUD_57600, (FINE_TUNING_FACTOR)*624000000.0 / 115200, // BAUD_115200, (FINE_TUNING_FACTOR)*624000000.0 / 230400, // BAUD_230400, (FINE_TUNING_FACTOR)*624000000.0 / 460800, // BAUD_460800, (FINE_TUNING_FACTOR)*624000000.0 / 921600, // BAUD_921600, (FINE_TUNING_FACTOR)*624000000.0 / 1843200, // BAUD_1843200, (FINE_TUNING_FACTOR)*624000000.0 / 1000000, // BAUD_1000000, (FINE_TUNING_FACTOR)*624000000.0 / 2000000, // BAUD_2000000, }; static struct DMA_CH_CFG_T usr_dma_ch_cfg = { .fifo_th = DMA_FIFO_TH_1, .src_burst_size = DMA_SRC_BURST_SIZE_1, .src_width = DMA_WIDTH_4B, .dst_width = DMA_WIDTH_4B, .src_addr_ctrl = DMA_ADDR_FIXED, .dst_addr_ctrl = DMA_ADDR_FIXED, .src_req_sel = DMA_REQ_UART_RX, .dst_req_sel = DMA_REQ_MEM, }; #if (UCI_INTF_UART_HS) static bool rx_idle = true; static void host2slave_gpio_callback(enum IO_PIN_T pin); #endif static void uci_tl_setup(void) { uart_open(UCI_PORT, &uci_uart_cfg); timer_open(UCI_IDLE_TIMER_ID, &timer_cfg); UCI_IDLE_TIMER->CTRL &= ~TIMER_CTRL_ENABLE_MSK; idle_timer_val_reg = (uint32_t)tick_per_byte[UCI_INTF_UART_BAUD]; dma_open(TIM_EN_DMA_CH, &usr_dma_ch_cfg); dma_open(TIM_RECNT_DMA_CH, &usr_dma_ch_cfg); #if (UCI_INTF_UART_HS) /*Host to slave pin pull-up. */ io_pull_set(HOST2SLAVE_HS_GPIO, IO_PULL_UP, IO_PULL_UP_LEVEL0); /*Host to salve pin wake-up enalbe by low level.*/ power_wakeup_enable((enum POWER_WAKEUP_SOURCE_T)HOST2SLAVE_HS_GPIO, POWER_WAKEUP_LEVEL_LOW); gpio_pin_set_dir(SLAVE2HOST_HS_GPIO, GPIO_DIR_OUT, 1); gpio_pin_set_dir(HOST2SLAVE_HS_GPIO, GPIO_DIR_IN, 0); rx_idle = true; gpio_enable_irq(HOST2SLAVE_HS_GPIO, GPIO_IRQ_TYPE_FALLING_EDGE, host2slave_gpio_callback); #else memset(recv_buff, 0, UCI_HEADER_SIZE); uart_receive(UCI_PORT, recv_buff, UCI_RX_BUFF_SIZE, uci_rx_callback); dma_transfer(TIM_EN_DMA_CH, &idle_timer_ctrl_reg, (uint32_t *)&UCI_IDLE_TIMER->CTRL, UCI_RX_BUFF_SIZE, NULL); dma_transfer(TIM_RECNT_DMA_CH, &idle_timer_val_reg, (uint32_t *)&UCI_IDLE_TIMER->VALUE, UCI_RX_BUFF_SIZE, NULL); #endif } #if !UCI_INTF_UART_HS static void uci_tl_reset(void) { uart_open(UCI_PORT, &uci_uart_cfg); timer_open(UCI_IDLE_TIMER_ID, &timer_cfg); UCI_IDLE_TIMER->CTRL &= ~TIMER_CTRL_ENABLE_MSK; idle_timer_val_reg = (uint32_t)tick_per_byte[UCI_INTF_UART_BAUD]; dma_open(TIM_EN_DMA_CH, &usr_dma_ch_cfg); dma_open(TIM_RECNT_DMA_CH, &usr_dma_ch_cfg); } #endif static void uci_tl_send_ntf(const uint8_t *msg, uint16_t len) { struct UCI_TL_MSG_T *p = WsfBufAlloc((uint16_t)(len + sizeof(struct UCI_TL_MSG_T))); if (p != NULL) { p->msg_length = len; memcpy(p->msg, msg, len); WsfQueueEnq(&g_uci_tl_dev.tl_up_queue, p); g_uci_tl_dev.uci_tl_up_done_notify(); } } static void uci_rx_callback(void *dev, uint32_t err_code) { if (err_code != DMA_INT_TYPE_DONE) { idle_timer_callback(NULL, 0); } } static void idle_timer_callback(void *dev, uint32_t time) { uint16_t frame_len; UCI_IDLE_TIMER->CTRL &= ~TIMER_CTRL_ENABLE_MSK; UCI_IDLE_TIMER->VALUE = idle_timer_val_reg; uart_rx_force_abort_dma(UCI_PORT, NULL); dma_abort(TIM_EN_DMA_CH, NULL); dma_abort(TIM_RECNT_DMA_CH, NULL); if (((recv_buff[0] >> 5) & 0x0F) > 3) { LOG_INFO(TRACE_MODULE_UCI, "Unkown MT field:%X\r\n", (recv_buff[0] >> 5) & 0x0F); goto exit; } frame_len = (uint16_t)(((*(recv_buff + 2)) << 8) + *(recv_buff + 3) + UCI_HEADER_SIZE); if (frame_len > UCI_RX_BUFF_SIZE) { LOG_INFO(TRACE_MODULE_UCI, "Message length(%d) is too long\r\n", frame_len); goto exit; } if (frame_len <= (UCI_RX_BUFF_SIZE - DMA->CH[RX_DMA_CH].DATA_SIZE)) { if (WsfQueueCount(&g_uci_tl_dev.tl_down_queue) < UCI_MAX_DL_ITEMS) { struct UCI_TL_MSG_T *p; if ((p = WsfBufAlloc((uint16_t)(frame_len + sizeof(struct UCI_TL_MSG_T)))) != NULL) { memcpy(p->msg, recv_buff, frame_len); p->msg_length = frame_len; WsfQueueEnq(&g_uci_tl_dev.tl_down_queue, p); } else { LOG_INFO(TRACE_MODULE_UCI, "No buff to queue cmd\r\n"); } } /* Set UCI receive event */ if (g_uci_tl_dev.uci_tl_down_notify != NULL) { g_uci_tl_dev.uci_tl_down_notify(); } } else { LOG_INFO(TRACE_MODULE_UCI, "UCI message rx timeout or cnt is wrong\r\n"); uci_tl_send_ntf(retry_ntf, sizeof(retry_ntf)); } exit: power_mode_clear(POWER_UNIT_UCI_RX); #if (UCI_INTF_UART_HS) rx_idle = true; return; #else memset(recv_buff, 0, UCI_HEADER_SIZE); uart_receive(UCI_PORT, recv_buff, UCI_RX_BUFF_SIZE, uci_rx_callback); dma_transfer(TIM_EN_DMA_CH, &idle_timer_ctrl_reg, (uint32_t *)&UCI_IDLE_TIMER->CTRL, UCI_RX_BUFF_SIZE, NULL); dma_transfer(TIM_RECNT_DMA_CH, &idle_timer_val_reg, (uint32_t *)&UCI_IDLE_TIMER->VALUE, UCI_RX_BUFF_SIZE, NULL); #endif } static void uci_tx_over_callback(void *dev, uint32_t err_code) { if (tl_up_msg) { WsfBufFree(tl_up_msg); tl_up_msg = NULL; } g_uci_tl_dev.uci_tl_up_done_notify(); } #if (UCI_INTF_UART_HS) static void host2slave_gpio_callback(enum IO_PIN_T pin) { if (!gpio_pin_get_val(HOST2SLAVE_HS_GPIO) && rx_idle) { rx_idle = false; memset(recv_buff, 0, UCI_HEADER_SIZE); uart_receive(UCI_PORT, recv_buff, UCI_RX_BUFF_SIZE, uci_rx_callback); UCI_IDLE_TIMER->VALUE = UCI_HS_TIMEOUT_MS * 62400; UCI_IDLE_TIMER->CTRL |= TIMER_CTRL_ENABLE_MSK; dma_transfer(TIM_EN_DMA_CH, &idle_timer_ctrl_reg, (uint32_t *)&UCI_IDLE_TIMER->CTRL, UCI_RX_BUFF_SIZE, NULL); dma_transfer(TIM_RECNT_DMA_CH, &idle_timer_val_reg, (uint32_t *)&UCI_IDLE_TIMER->VALUE, UCI_RX_BUFF_SIZE, NULL); power_mode_request(POWER_UNIT_UCI_RX, POWER_MODE_SLEEP); } } #endif static void uci_tl_up_req(void) { if (!tx_idle) { return; } /* Get messages from the up queue */ tl_up_msg = WsfQueueDeq(&g_uci_tl_dev.tl_up_queue); if (tl_up_msg != NULL) { if (tl_up_msg->msg_length == 0) { WsfBufFree(tl_up_msg); tl_up_msg = NULL; g_uci_tl_dev.uci_tl_up_done_notify(); } else { tx_idle = false; #if (UCI_INTF_UART_HS) gpio_pin_clr(SLAVE2HOST_HS_GPIO); // delay to send delay_us(UCI_INTF_UART_HS_DELAYED_SEND_US); #endif uart_send(UCI_PORT, tl_up_msg->msg, tl_up_msg->msg_length, uci_tx_over_callback); power_mode_request(POWER_UNIT_UCI_TX, POWER_MODE_SLEEP); } } else { LOG_INFO(TRACE_MODULE_UCI, "Up queue is empty\r\n"); g_uci_tl_dev.uci_tl_up_done_notify(); } } static bool uci_tl_up_is_active(void) { if (!uart_tx_in_progress(UCI_PORT) && !tx_idle) { #if (UCI_INTF_UART_HS) gpio_pin_set(SLAVE2HOST_HS_GPIO); #endif tx_idle = true; power_mode_clear(POWER_UNIT_UCI_TX); } return !tx_idle; } static void uci_tl_stop_rx(void) { #if (UCI_INTF_UART_HS) #else UCI_IDLE_TIMER->CTRL &= ~TIMER_CTRL_ENABLE_MSK; UCI_IDLE_TIMER->VALUE = idle_timer_val_reg; uart_rx_force_abort_dma(UCI_PORT, NULL); dma_abort(TIM_EN_DMA_CH, NULL); dma_abort(TIM_RECNT_DMA_CH, NULL); #endif } static void uci_tl_restart_rx(void) { #if (UCI_INTF_UART_HS) #else memset(recv_buff, 0, UCI_HEADER_SIZE); uart_receive(UCI_PORT, recv_buff, UCI_RX_BUFF_SIZE, uci_rx_callback); dma_transfer(TIM_EN_DMA_CH, &idle_timer_ctrl_reg, (uint32_t *)&UCI_IDLE_TIMER->CTRL, UCI_RX_BUFF_SIZE, NULL); dma_transfer(TIM_RECNT_DMA_CH, &idle_timer_val_reg, (uint32_t *)&UCI_IDLE_TIMER->VALUE, UCI_RX_BUFF_SIZE, NULL); #endif } #endif