/* * Copyright (c) 2019-2023 Beijing Hanwei Innovation Technology Ltd. Co. and * its subsidiaries and affiliates (collectly called MKSEMI). * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into an MKSEMI * integrated circuit in a product or a software update for such product, * must reproduce the above copyright notice, this list of conditions and * the following disclaimer in the documentation and/or other materials * provided with the distribution. * * 3. Neither the name of MKSEMI nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * 4. This software, with or without modification, must only be used with a * MKSEMI integrated circuit. * * 5. Any software provided in binary form under this license must not be * reverse engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY MKSEMI "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL MKSEMI OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "mk_spi.h" #include "mk_trace.h" #include "mk_clock.h" #include "mk_reset.h" #include "string.h" static struct SPI_HANDLE_T spi_handle[SPI_MAX_NUM] = { { .base = SPI0, .irq = SPI0_IRQn, .dma_rx_ch = DMA_CH0, .dma_tx_ch = DMA_CH1, .tx_dummy = 0, }, { .base = SPI1, .irq = SPI1_IRQn, .dma_rx_ch = DMA_CH4, .dma_tx_ch = DMA_CH5, .tx_dummy = 0, }, }; #if SPI_DMA_MODE_EN static void spi_dma_callback(void *ch, uint32_t err_code); static void spi_dma_abort_callback(void *ch, uint32_t err_code); #endif static uint8_t spi_get_frame_size(enum SPI_DEV_T id) { uint8_t bits, cnt; bits = GET_BIT_FIELD(spi_handle[id].base->CTRL0, SPI_CTRL0_DSS_MSK, SPI_CTRL0_DSS_POS); if (bits < 8) { cnt = 1; } else { cnt = 2; } return cnt; } static int spi_state_set(enum SPI_DEV_T id, enum SPI_STATE_T state) { int ret = DRV_OK; uint32_t lock = int_lock(); // update state switch (spi_handle[id].state) { case SPI_STATE_READY: spi_handle[id].state = state; break; case SPI_STATE_BUSY_RX: if (state == SPI_STATE_BUSY_TX) { spi_handle[id].state = SPI_STATE_BUSY_TX_RX; } else { ret = DRV_BUSY; } break; case SPI_STATE_BUSY_TX: if (state == SPI_STATE_BUSY_RX) { spi_handle[id].state = SPI_STATE_BUSY_TX_RX; } else { ret = DRV_BUSY; } break; case SPI_STATE_BUSY_TX_RX: ret = DRV_BUSY; break; case SPI_STATE_RESET: case SPI_STATE_TIMEOUT: case SPI_STATE_ERROR: ret = DRV_ERROR; break; } int_unlock(lock); return ret; } static void spi_state_clear(enum SPI_DEV_T id, enum SPI_STATE_T state) { uint32_t lock = int_lock(); // update state spi_handle[id].state &= ~state; if (spi_handle[id].state == 0) { spi_handle[id].state = SPI_STATE_READY; } int_unlock(lock); } enum SPI_STATE_T spi_state_get(enum SPI_DEV_T id) { return spi_handle[id].state; } bool spi_is_busy(enum SPI_DEV_T id) { return ((spi_handle[id].state != SPI_STATE_RESET) && (spi_handle[id].base->CTRL1 & SPI_CTRL1_SSE_MSK) && (spi_handle[id].base->STATUS0 & SPI_STATUS0_BSY_MSK)); } int spi_open(enum SPI_DEV_T id, struct SPI_CFG_T *config) { if ((id >= SPI_MAX_NUM) && (config == NULL)) { return DRV_ERROR; } else if (id == SPI_ID0) { // enable SPI0 clock clock_enable(CLOCK_SPI0); reset_module(RESET_MODULE_SPI0); } else if (id == SPI_ID1) { // enable SPI1 clock clock_enable(CLOCK_SPI1); reset_module(RESET_MODULE_SPI1); } uint32_t val; uint32_t cpsdvsr, scr; uint32_t spi_clk = clock_get_frequency(CLOCK_AHB_CLK); ASSERT(config->bit_rate <= spi_clk / (SPI_CPSDVSR_MIN * (1 + SPI_SCR_MIN)), "SPI rate too large: %d", config->bit_rate); ASSERT(config->bit_rate >= spi_clk / (SPI_CPSDVSR_MAX * (1 + SPI_SCR_MAX)), "SPI rate too small: %d", config->bit_rate); ASSERT(config->data_bits <= SPI_DATA_BITS_MAX && config->data_bits >= SPI_DATA_BITS_MIN, "Invalid SPI data bits: %d", config->data_bits); // reset device handle spi_handle[id].tx_buff = NULL; spi_handle[id].tx_callback = NULL; spi_handle[id].tx_count = 0; spi_handle[id].tx_size = 0; spi_handle[id].rx_buff = NULL; spi_handle[id].rx_callback = NULL; spi_handle[id].rx_count = 0; spi_handle[id].rx_size = 0; spi_handle[id].slave = config->slave; spi_handle[id].dma_rx = config->dma_rx; spi_handle[id].dma_tx = config->dma_tx; spi_handle[id].int_rx = config->int_rx; spi_handle[id].int_tx = config->int_tx; // caculate divisor val = (spi_clk + config->bit_rate - 1) / config->bit_rate; cpsdvsr = (val + SPI_SCR_MAX) / (SPI_SCR_MAX + 1); if (cpsdvsr < SPI_CPSDVSR_MIN) { cpsdvsr = SPI_CPSDVSR_MIN; } else { if (cpsdvsr & 0x1) { cpsdvsr += 1; } if (cpsdvsr > SPI_CPSDVSR_MAX) { cpsdvsr = SPI_CPSDVSR_MAX; } } scr = (val + cpsdvsr - 1) / cpsdvsr; if (scr > SPI_SCR_MIN) { scr -= 1; } if (scr > SPI_SCR_MAX) { scr = SPI_SCR_MAX; } val = SPI_CTRL0_SCR(scr) | (config->clk_phase ? SPI_CTRL0_SPH_MSK : 0) | (config->clk_polarity ? SPI_CTRL0_SPO_MSK : 0) | (config->ti_mode ? SPI_CTRL0_FRF(1) : 0) | SPI_CTRL0_DSS(config->data_bits - 1); spi_handle[id].base->CTRL0 = val; // enable SPI val = (config->slave ? SPI_CTRL1_MS_MSK : 0) | SPI_CTRL1_SSE_MSK; spi_handle[id].base->CTRL1 = val; spi_handle[id].base->PRESCALER = SPI_PRESCALER(cpsdvsr); #if SPI_INT_MODE_EN if (spi_handle[id].int_rx || spi_handle[id].int_tx) { NVIC_SetPriority(spi_handle[id].irq, IRQ_PRIORITY_NORMAL); NVIC_ClearPendingIRQ(spi_handle[id].irq); NVIC_EnableIRQ(spi_handle[id].irq); } #endif spi_handle[id].state = SPI_STATE_READY; return DRV_OK; } int spi_close(enum SPI_DEV_T id) { if (id >= SPI_MAX_NUM) { return DRV_ERROR; } #if SPI_INT_MODE_EN if (spi_handle[id].int_rx || spi_handle[id].int_tx) { NVIC_DisableIRQ(spi_handle[id].irq); NVIC_ClearPendingIRQ(spi_handle[id].irq); } #endif // disable SPI spi_handle[id].base->CTRL1 &= ~SPI_CTRL1_SSE_MSK; if (id == SPI_ID0) { // disable SPI0 clock clock_disable(CLOCK_SPI0); } else if (id == SPI_ID1) { // disable SPI1 clock clock_disable(CLOCK_SPI1); } spi_handle[id].state = SPI_STATE_RESET; return DRV_OK; } #if defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" #endif /* * TX */ int spi_send(enum SPI_DEV_T id, uint8_t *tx_buf, uint32_t len, drv_callback_t callback) { uint8_t frame_size = spi_get_frame_size(id); if ((tx_buf == 0) || (len == 0) || (len % frame_size)) { LOG_INFO(TRACE_MODULE_DRIVER, "Invalid SPI send parameters\r\n"); return DRV_ERROR; } int ret = spi_state_set(id, SPI_STATE_BUSY_TX); if (ret != DRV_OK) { return ret; } spi_handle[id].tx_buff = tx_buf; spi_handle[id].tx_count = 0; spi_handle[id].tx_size = len; spi_handle[id].tx_callback = callback; uint32_t frame_data = 0U; if (spi_handle[id].dma_tx) { #if SPI_DMA_MODE_EN struct DMA_CH_CFG_T spi_tx_dma_cfg = { .fifo_th = (frame_size == 2 ? DMA_FIFO_TH_2 : DMA_FIFO_TH_1), .src_burst_size = DMA_SRC_BURST_SIZE_1, .src_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .dst_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .src_addr_ctrl = DMA_ADDR_INC, .dst_addr_ctrl = DMA_ADDR_FIXED, .src_req_sel = DMA_REQ_MEM, .dst_req_sel = (id == SPI_ID1 ? DMA_REQ_SPI1_TX : DMA_REQ_SPI0_TX), }; spi_handle[id].base->DMA_EN = SPI_DMA_EN_TX_MSK; dma_open(spi_handle[id].dma_tx_ch, &spi_tx_dma_cfg); dma_transfer(spi_handle[id].dma_tx_ch, tx_buf, (uint8_t *)&spi_handle[id].base->DATA, len / frame_size, spi_dma_callback); #endif } else if (spi_handle[id].int_tx) { #if SPI_INT_MODE_EN // write data to FIFO while ((spi_handle[id].base->STATUS0 & SPI_STATUS0_TNF_MSK) && (spi_handle[id].tx_count < spi_handle[id].tx_size)) { frame_data = 0; memcpy(&frame_data, &spi_handle[id].tx_buff[spi_handle[id].tx_count], frame_size); spi_handle[id].base->DATA = frame_data; spi_handle[id].tx_count += frame_size; } // enable interrupts spi_handle[id].base->INTR_EN |= SPI_INTR_EN_TX_MSK; #endif } else { #if SPI_POLL_MODE_EN // polling while (spi_handle[id].tx_count < spi_handle[id].tx_size) { if ((spi_handle[id].base->CTRL1 & SPI_CTRL1_SSE_MSK) == 0) { ret = DRV_ERROR; break; } if (spi_handle[id].base->STATUS0 & SPI_STATUS0_TNF_MSK) { frame_data = 0; memcpy(&frame_data, &spi_handle[id].tx_buff[spi_handle[id].tx_count], frame_size); spi_handle[id].base->DATA = frame_data; spi_handle[id].tx_count += frame_size; } } while (spi_is_busy(id)) { } spi_state_clear(id, SPI_STATE_BUSY_TX); if (spi_handle[id].tx_callback) { spi_handle[id].tx_callback(&id, 0); } #endif } return ret; } /* * RX * Master RX DMA mode need to enable TX DMA to send dummy data */ int spi_receive(enum SPI_DEV_T id, uint8_t *rx_buf, uint32_t len, drv_callback_t callback) { uint8_t frame_size = spi_get_frame_size(id); if ((rx_buf == 0) || (len == 0) || (len % frame_size)) { LOG_INFO(TRACE_MODULE_DRIVER, "Invalid SPI recv parameters\r\n"); return DRV_ERROR; } int ret = spi_state_set(id, SPI_STATE_BUSY_RX); if (ret != DRV_OK) { return ret; } spi_handle[id].rx_buff = rx_buf; spi_handle[id].rx_count = 0; spi_handle[id].rx_size = len; spi_handle[id].rx_callback = callback; uint32_t frame_data = 0U; uint32_t dummy_cnt = 0; // clear FIFO while (spi_handle[id].base->STATUS0 & SPI_STATUS0_RNE_MSK) { spi_handle[id].base->DATA; } spi_handle[id].base->INTR_CLR = ~0UL; if (spi_handle[id].dma_rx) { #if SPI_DMA_MODE_EN struct DMA_CH_CFG_T spi_rx_dma_cfg = { .fifo_th = (frame_size == 2 ? DMA_FIFO_TH_2 : DMA_FIFO_TH_1), .src_burst_size = DMA_SRC_BURST_SIZE_1, .src_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .dst_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .src_addr_ctrl = DMA_ADDR_FIXED, .dst_addr_ctrl = DMA_ADDR_INC, .src_req_sel = (id == SPI_ID1 ? DMA_REQ_SPI1_RX : DMA_REQ_SPI0_RX), .dst_req_sel = DMA_REQ_MEM, }; spi_handle[id].base->DMA_EN = (spi_handle[id].slave == 0) ? (SPI_DMA_EN_RX_MSK | SPI_DMA_EN_TX_MSK) : SPI_DMA_EN_RX_MSK; dma_open(spi_handle[id].dma_rx_ch, &spi_rx_dma_cfg); dma_transfer(spi_handle[id].dma_rx_ch, (uint8_t *)&spi_handle[id].base->DATA, rx_buf, len / frame_size, spi_dma_callback); if (spi_handle[id].slave == false) { struct DMA_CH_CFG_T spi_tx_dma_cfg = { .fifo_th = (frame_size == 2 ? DMA_FIFO_TH_2 : DMA_FIFO_TH_1), .src_burst_size = DMA_SRC_BURST_SIZE_1, .src_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .dst_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .src_addr_ctrl = DMA_ADDR_FIXED, .dst_addr_ctrl = DMA_ADDR_FIXED, .src_req_sel = DMA_REQ_MEM, .dst_req_sel = (id == SPI_ID1 ? DMA_REQ_SPI1_TX : DMA_REQ_SPI0_TX), }; dma_open(spi_handle[id].dma_tx_ch, &spi_tx_dma_cfg); dma_transfer(spi_handle[id].dma_tx_ch, &spi_handle[id].tx_dummy, (uint8_t *)&spi_handle[id].base->DATA, len / frame_size, NULL); } #endif } else if (spi_handle[id].int_rx) { #if SPI_INT_MODE_EN // enable interrupts spi_handle[id].base->INTR_EN |= SPI_INTR_EN_RX_MSK | SPI_INTR_EN_RT_MSK | SPI_INTR_EN_ROR_MSK; if ((spi_handle[id].slave == 0) && (((spi_handle[id].state & SPI_STATE_BUSY_TX) == 0) && (spi_handle[id].base->STATUS0 & SPI_STATUS0_TFE_MSK))) { // send dummy data as many as possible dummy_cnt = spi_handle[id].rx_size - spi_handle[id].rx_count; while ((dummy_cnt >= frame_size) && (spi_handle[id].base->STATUS0 & SPI_STATUS0_TNF_MSK)) { spi_handle[id].base->DATA = spi_handle[id].tx_dummy; dummy_cnt -= frame_size; } } #endif } else { #if SPI_POLL_MODE_EN // polling while (spi_handle[id].rx_count < spi_handle[id].rx_size) { if ((spi_handle[id].base->CTRL1 & SPI_CTRL1_SSE_MSK) == 0) { ret = DRV_ERROR; break; } if (spi_handle[id].base->STATUS0 & SPI_STATUS0_RNE_MSK) { frame_data = spi_handle[id].base->DATA; memcpy(&spi_handle[id].rx_buff[spi_handle[id].rx_count], &frame_data, frame_size); spi_handle[id].rx_count += frame_size; } if ((spi_handle[id].slave == 0) && (((spi_handle[id].state & SPI_STATE_BUSY_TX) == 0) && (spi_handle[id].base->STATUS0 & SPI_STATUS0_TFE_MSK))) { if (dummy_cnt < spi_handle[id].rx_size) { // send dummy data spi_handle[id].base->DATA = spi_handle[id].tx_dummy; dummy_cnt++; } } } // update state spi_state_clear(id, SPI_STATE_BUSY_RX); if (spi_handle[id].rx_callback) { spi_handle[id].rx_callback(&id, 0); } #endif } return ret; } /* * Full duplex * limitation: int_tx == int_rx, dma_tx == dma_rx */ int spi_transfer(enum SPI_DEV_T id, uint8_t *tx_buf, uint8_t *rx_buf, uint32_t len, drv_callback_t callback) { uint8_t frame_size = spi_get_frame_size(id); if ((rx_buf == 0) || (len == 0) || (len % frame_size)) { LOG_INFO(TRACE_MODULE_DRIVER, "Invalid SPI transfer parameters\r\n"); return DRV_ERROR; } if ((spi_handle[id].dma_rx != spi_handle[id].dma_tx) || (spi_handle[id].int_rx != spi_handle[id].int_tx)) { LOG_INFO(TRACE_MODULE_DRIVER, "Invalid SPI configuration\r\n"); return DRV_ERROR; } int ret = spi_state_set(id, SPI_STATE_BUSY_TX_RX); if (ret != DRV_OK) { return ret; } spi_handle[id].tx_buff = tx_buf; spi_handle[id].tx_count = 0; spi_handle[id].tx_size = len; spi_handle[id].tx_callback = NULL; spi_handle[id].rx_buff = rx_buf; spi_handle[id].rx_count = 0; spi_handle[id].rx_size = len; spi_handle[id].rx_callback = callback; // clear FIFO while (spi_handle[id].base->STATUS0 & SPI_STATUS0_RNE_MSK) { spi_handle[id].base->DATA; } spi_handle[id].base->INTR_CLR = ~0UL; uint32_t frame_data = 0U; if (spi_handle[id].dma_tx) { #if SPI_DMA_MODE_EN struct DMA_CH_CFG_T spi_rx_dma_cfg = { .fifo_th = (frame_size == 2 ? DMA_FIFO_TH_2 : DMA_FIFO_TH_1), .src_burst_size = DMA_SRC_BURST_SIZE_1, .src_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .dst_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .src_addr_ctrl = DMA_ADDR_FIXED, .dst_addr_ctrl = DMA_ADDR_INC, .src_req_sel = (id == SPI_ID1 ? DMA_REQ_SPI1_RX : DMA_REQ_SPI0_RX), .dst_req_sel = DMA_REQ_MEM, }; spi_handle[id].base->DMA_EN = SPI_DMA_EN_TX_MSK | SPI_DMA_EN_RX_MSK; dma_open(spi_handle[id].dma_rx_ch, &spi_rx_dma_cfg); dma_transfer(spi_handle[id].dma_rx_ch, (uint8_t *)&spi_handle[id].base->DATA, rx_buf, len / frame_size, spi_dma_callback); struct DMA_CH_CFG_T spi_tx_dma_cfg = { .fifo_th = (frame_size == 2 ? DMA_FIFO_TH_2 : DMA_FIFO_TH_1), .src_burst_size = DMA_SRC_BURST_SIZE_1, .src_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .dst_width = (frame_size == 2 ? DMA_WIDTH_2B : DMA_WIDTH_1B), .src_addr_ctrl = DMA_ADDR_INC, .dst_addr_ctrl = DMA_ADDR_FIXED, .src_req_sel = DMA_REQ_MEM, .dst_req_sel = (id == SPI_ID1 ? DMA_REQ_SPI1_TX : DMA_REQ_SPI0_TX), }; dma_open(spi_handle[id].dma_tx_ch, &spi_tx_dma_cfg); dma_transfer(spi_handle[id].dma_tx_ch, tx_buf, (uint8_t *)&spi_handle[id].base->DATA, len / frame_size, spi_dma_callback); #endif } else if (spi_handle[id].int_tx) { #if SPI_INT_MODE_EN // write data to FIFO while ((spi_handle[id].base->STATUS0 & SPI_STATUS0_TNF_MSK) && (spi_handle[id].tx_count < spi_handle[id].tx_size)) { frame_data = 0; memcpy(&frame_data, &spi_handle[id].tx_buff[spi_handle[id].tx_count], frame_size); spi_handle[id].base->DATA = frame_data; spi_handle[id].tx_count += frame_size; } // enable interrupts spi_handle[id].base->INTR_EN |= SPI_INTR_EN_TX_MSK | SPI_INTR_EN_RX_MSK | SPI_INTR_EN_RT_MSK | SPI_INTR_EN_ROR_MSK; #endif } else { #if SPI_POLL_MODE_EN // polling while (spi_handle[id].rx_count < spi_handle[id].rx_size) { if ((spi_handle[id].base->CTRL1 & SPI_CTRL1_SSE_MSK) == 0) { ret = DRV_ERROR; break; } if ((spi_handle[id].base->STATUS0 & SPI_STATUS0_TNF_MSK) && (spi_handle[id].tx_count < spi_handle[id].tx_size)) { frame_data = 0; memcpy(&frame_data, &spi_handle[id].tx_buff[spi_handle[id].tx_count], frame_size); spi_handle[id].base->DATA = frame_data; spi_handle[id].tx_count += frame_size; } if (spi_handle[id].base->STATUS0 & SPI_STATUS0_RNE_MSK) { frame_data = spi_handle[id].base->DATA; memcpy(&spi_handle[id].rx_buff[spi_handle[id].rx_count], &frame_data, frame_size); spi_handle[id].rx_count += frame_size; } } // update state spi_state_clear(id, SPI_STATE_BUSY_TX_RX); if (spi_handle[id].rx_callback) { spi_handle[id].rx_callback(&id, 0); } #endif } return ret; } int spi_abort_dma(enum SPI_DEV_T id, uint32_t abort_direction, drv_callback_t abort_tx_callback, drv_callback_t abort_rx_callback) { #if SPI_DMA_MODE_EN /* * disable the SPI DMA rx request if enabled * if (REG_IS_BIT_SET(spi_handle[id].base->DMA_EN, SPI_DMA_EN_RX_MSK)) * if (spi_handle[id].state & SPI_STATE_BUSY_RX) */ if (SPI_DMA_ABORT_RX & abort_direction) { spi_handle[id].rx_abort_callback = abort_rx_callback; spi_handle[id].base->DMA_EN &= ~SPI_DMA_EN_RX_MSK; // dma_abort(spi_handle[id].dma_rx_ch, spi_dma_abort_callback); dma_force_abort(spi_handle[id].dma_rx_ch, spi_dma_abort_callback); } /* * disable the SPI DMA tx request if enabled * if (REG_IS_BIT_SET(spi_handle[id].base->DMA_EN, SPI_DMA_EN_TX_MSK)) * if (spi_handle[id].state & SPI_STATE_BUSY_TX) */ if (SPI_DMA_ABORT_TX & abort_direction) { spi_handle[id].tx_abort_callback = abort_tx_callback; spi_handle[id].base->DMA_EN &= ~SPI_DMA_EN_TX_MSK; // dma_abort(spi_handle[id].dma_tx_ch, spi_dma_abort_callback); dma_force_abort(spi_handle[id].dma_tx_ch, spi_dma_abort_callback); } #endif return DRV_OK; } #if defined(__GNUC__) #pragma GCC diagnostic pop #endif #if SPI_DMA_MODE_EN static void spi_dma_abort_callback(void *ch, uint32_t err_code) { uint8_t ch_num = *(uint8_t *)ch; drv_callback_t usr_callback = NULL; enum SPI_DEV_T id; if ((ch_num == spi_handle[SPI_ID0].dma_tx_ch) || (ch_num == spi_handle[SPI_ID1].dma_tx_ch)) { id = (ch_num == spi_handle[SPI_ID0].dma_tx_ch ? SPI_ID0 : SPI_ID1); // TX dma abort usr_callback = spi_handle[id].tx_abort_callback; // update state spi_handle[id].state = SPI_STATE_READY; spi_handle[id].tx_buff = NULL; spi_handle[id].tx_abort_callback = NULL; spi_handle[id].tx_callback = NULL; spi_handle[id].tx_count = 0; spi_handle[id].tx_size = 0; } else if ((ch_num == spi_handle[SPI_ID0].dma_rx_ch) || (ch_num == spi_handle[SPI_ID1].dma_rx_ch)) { id = (ch_num == spi_handle[SPI_ID0].dma_rx_ch ? SPI_ID0 : SPI_ID1); // RX dma abort usr_callback = spi_handle[id].rx_abort_callback; spi_handle[id].state = SPI_STATE_READY; spi_handle[id].rx_buff = NULL; spi_handle[id].rx_abort_callback = NULL; spi_handle[id].rx_callback = NULL; spi_handle[id].rx_count = 0; spi_handle[id].rx_size = 0; } else { ASSERT(0, "Unexpected dma channel\r\n"); } if (usr_callback) { usr_callback(&id, err_code); } } static void spi_dma_callback(void *ch, uint32_t err_code) { uint8_t ch_num = *(uint8_t *)ch; drv_callback_t usr_callback = NULL; enum SPI_DEV_T id; if ((ch_num == spi_handle[SPI_ID0].dma_tx_ch) || (ch_num == spi_handle[SPI_ID1].dma_tx_ch)) { id = (ch_num == spi_handle[SPI_ID0].dma_tx_ch ? SPI_ID0 : SPI_ID1); if (err_code == DMA_INT_TYPE_DONE) { spi_handle[id].base->DMA_EN &= ~SPI_DMA_EN_TX_MSK; // TX done usr_callback = spi_handle[id].tx_callback; // update state spi_state_clear(id, SPI_STATE_BUSY_TX); } else { // update state if ((spi_handle[id].state == SPI_STATE_BUSY_TX_RX) || (spi_handle[id].state == SPI_STATE_BUSY_TX)) { spi_handle[id].state = SPI_STATE_ERROR; } else { ASSERT(0, "Unexpected spi state\r\n"); } } spi_handle[id].tx_buff = NULL; spi_handle[id].tx_callback = NULL; spi_handle[id].tx_count = 0; spi_handle[id].tx_size = 0; } else if ((ch_num == spi_handle[SPI_ID0].dma_rx_ch) || (ch_num == spi_handle[SPI_ID1].dma_rx_ch)) { id = (ch_num == spi_handle[SPI_ID0].dma_rx_ch ? SPI_ID0 : SPI_ID1); if (err_code == DMA_INT_TYPE_DONE) { spi_handle[id].base->DMA_EN &= ~SPI_DMA_EN_RX_MSK; // RX done usr_callback = spi_handle[id].rx_callback; // update state spi_state_clear(id, SPI_STATE_BUSY_RX); } else { // update state if ((spi_handle[id].state == SPI_STATE_BUSY_TX_RX) || (spi_handle[id].state == SPI_STATE_BUSY_RX)) { spi_handle[id].state = SPI_STATE_ERROR; } else { ASSERT(0, "Unexpected spi state\r\n"); } } spi_handle[id].rx_buff = NULL; spi_handle[id].rx_callback = NULL; spi_handle[id].rx_count = 0; spi_handle[id].rx_size = 0; } else { ASSERT(0, "Unexpected dma channel\r\n"); } if (usr_callback) { usr_callback(&id, err_code); } } #endif #if SPI_INT_MODE_EN static void spi_irq_handler(enum SPI_DEV_T id) { drv_callback_t usr_callback = NULL; // what caused the interrupt? uint32_t int_stat = spi_handle[id].base->INTR_STATUS; uint32_t frame_data = 0U; uint8_t frame_size = spi_get_frame_size(id); if (int_stat & (SPI_INTR_STATUS_RX_MSK | SPI_INTR_STATUS_ROR_MSK | SPI_INTR_STATUS_RT_MSK)) { if (int_stat & SPI_INTR_STATUS_ROR_MSK) { spi_handle[id].base->INTR_CLR = SPI_INTR_CLR_ROR_MSK; LOG_INFO(TRACE_MODULE_DRIVER, "SPI receive overrun\r\n"); } else if (int_stat & SPI_INTR_STATUS_RT_MSK) { // rx timeout interrupt happens to fetch less than 4 bytes data in FIFO spi_handle[id].base->INTR_CLR = SPI_INTR_CLR_RT_MSK; } // received data - read FIFO if (spi_handle[id].state & SPI_STATE_BUSY_RX) { while ((spi_handle[id].base->STATUS0 & SPI_STATUS0_RNE_MSK) && (spi_handle[id].rx_count < spi_handle[id].rx_size)) { frame_data = spi_handle[id].base->DATA; memcpy(&spi_handle[id].rx_buff[spi_handle[id].rx_count], &frame_data, frame_size); spi_handle[id].rx_count += frame_size; } if (spi_handle[id].rx_count == spi_handle[id].rx_size) { // RX done - disable interrupts spi_handle[id].base->INTR_EN &= ~(SPI_INTR_EN_RX_MSK | SPI_INTR_EN_RT_MSK | SPI_INTR_EN_ROR_MSK); usr_callback = spi_handle[id].rx_callback; // update state spi_state_clear(id, SPI_STATE_BUSY_RX); spi_handle[id].rx_buff = NULL; spi_handle[id].rx_callback = NULL; spi_handle[id].rx_count = 0; spi_handle[id].rx_size = 0; } else if ((spi_handle[id].slave == 0) && (((spi_handle[id].state & SPI_STATE_BUSY_TX) == 0) && (spi_handle[id].base->STATUS0 & SPI_STATUS0_TFE_MSK))) { // send dummy data as many as possible uint32_t dummy_cnt = spi_handle[id].rx_size - spi_handle[id].rx_count; while ((dummy_cnt >= frame_size) && (spi_handle[id].base->STATUS0 & SPI_STATUS0_TNF_MSK)) { spi_handle[id].base->DATA = spi_handle[id].tx_dummy; dummy_cnt -= frame_size; } } } } else if (int_stat & SPI_INTR_STATUS_TX_MSK) { if (spi_handle[id].state & SPI_STATE_BUSY_TX) { if (spi_handle[id].tx_count == spi_handle[id].tx_size) { // TX done - disable interrupt spi_handle[id].base->INTR_EN &= ~SPI_INTR_EN_TX_MSK; usr_callback = spi_handle[id].tx_callback; // update state spi_state_clear(id, SPI_STATE_BUSY_TX); spi_handle[id].tx_buff = NULL; spi_handle[id].tx_callback = NULL; spi_handle[id].tx_count = 0; spi_handle[id].tx_size = 0; } else { // TX continue - write FIFO while ((spi_handle[id].base->STATUS0 & SPI_STATUS0_TNF_MSK) && (spi_handle[id].tx_count < spi_handle[id].tx_size)) { frame_data = 0; memcpy(&frame_data, &spi_handle[id].tx_buff[spi_handle[id].tx_count], frame_size); spi_handle[id].base->DATA = frame_data; spi_handle[id].tx_count += frame_size; } } } } else { // it will reach here since FIFO was read empty by last interrupt } if (usr_callback) { usr_callback(&id, int_stat); } } #endif void SPI0_IRQHandler(void) { #if SPI_INT_MODE_EN spi_irq_handler(SPI_ID0); #endif } void SPI1_IRQHandler(void) { #if SPI_INT_MODE_EN spi_irq_handler(SPI_ID1); #endif }