This commit is contained in:
刘可亮
2024-10-30 16:50:31 +08:00
parent 0ef85b55da
commit 661e71562d
458 changed files with 46555 additions and 12133 deletions

View File

@@ -183,6 +183,52 @@ void hal_qspi_show_ists(u32 id, u32 sts)
printf(" ISTS_BIT_TDONE\n");
}
static void qspi_irq_tx_remain_handle(struct qspi_slave_state *qspi, u32 sts)
{
u32 base, total, free_len, dolen, imsk;
base = qspi_hw_index_to_base(qspi->idx);
if (qspi->async_tx) {
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_CPU || qspi->work_mode == QSPI_WORK_MODE_ASYNC_DUPLEX_CPU) {
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
if (qspi->async_tx_remain) {
dolen = min(free_len, qspi->async_tx_remain);
qspi_hw_write_fifo(base, qspi->async_tx, dolen);
qspi->async_tx += dolen;
qspi->async_tx_wcnt += dolen;
qspi->async_tx_remain -= dolen;
} else {
imsk = ISTS_BIT_TF_EMP | ISTS_BIT_TF_RDY;
qspi_hw_interrupt_disable(base, imsk);
}
total = qspi->async_tx_remain + qspi->async_tx_wcnt;
qspi->async_tx_count = total - qspi_hw_get_tx_fifo_cnt(base);
if ((sts & ISTS_BIT_TDONE) && qspi_hw_get_tx_fifo_cnt(base))
qspi->async_tx_count -= 4;
}
}
}
static void qspi_irq_rx_remain_handle(struct qspi_slave_state *qspi, u32 sts)
{
u32 base, dolen;
base = qspi_hw_index_to_base(qspi->idx);
if (qspi->async_rx && qspi->async_rx_remain) {
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_CPU) || qspi->work_mode == QSPI_WORK_MODE_ASYNC_DUPLEX_CPU) {
dolen = qspi_hw_get_rx_fifo_cnt(base);
if (dolen > qspi->async_rx_remain)
dolen = qspi->async_rx_remain;
qspi_hw_read_fifo(base, qspi->async_rx, dolen);
qspi->async_rx += dolen;
qspi->async_rx_count += dolen;
qspi->async_rx_remain -= dolen;
}
}
}
void hal_qspi_slave_irq_handler(qspi_slave_handle *h)
{
struct qspi_slave_state *qspi;
@@ -197,53 +243,20 @@ void hal_qspi_slave_irq_handler(qspi_slave_handle *h)
if (sts & ISTS_BIT_TF_OVF)
qspi->status |= HAL_QSPI_STATUS_TX_OVER_FLOW;
if ((sts & ISTS_BIT_TF_EMP) || (sts & ISTS_BIT_TF_RDY)) {
u32 dolen, free_len;
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_CPU) &&
qspi->async_tx) {
u32 total;
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
if (qspi->async_tx_remain) {
dolen = min(free_len, qspi->async_tx_remain);
qspi_hw_write_fifo(base, qspi->async_tx, dolen);
qspi->async_tx += dolen;
qspi->async_tx_wcnt += dolen;
qspi->async_tx_remain -= dolen;
} else {
imsk = ISTS_BIT_TF_EMP | ISTS_BIT_TF_RDY;
qspi_hw_interrupt_disable(base, imsk);
}
total = qspi->async_tx_remain + qspi->async_tx_wcnt;
qspi->async_tx_count = total - qspi_hw_get_tx_fifo_cnt(base);
}
}
/* remain work handle in cpu copy */
if ((sts & ISTS_BIT_TF_EMP) || (sts & ISTS_BIT_TF_RDY) || (sts & ISTS_BIT_TDONE))
qspi_irq_tx_remain_handle(qspi, sts);
if ((sts & ISTS_BIT_RF_FUL) || (sts & ISTS_BIT_RF_RDY) || (sts & ISTS_BIT_TDONE))
qspi_irq_rx_remain_handle(qspi, sts);
if (sts & ISTS_BIT_RF_UDR)
qspi->status |= HAL_QSPI_STATUS_RX_UNDER_RUN;
if (sts & ISTS_BIT_RF_OVF)
qspi->status |= HAL_QSPI_STATUS_RX_OVER_FLOW;
if ((sts & ISTS_BIT_RF_FUL) || (sts & ISTS_BIT_RF_RDY) || (sts & ISTS_BIT_TDONE)) {
u32 dolen;
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_CPU) && qspi->async_rx &&
qspi->async_rx_remain) {
dolen = qspi_hw_get_rx_fifo_cnt(base);
if (dolen > qspi->async_rx_remain)
dolen = qspi->async_rx_remain;
qspi_hw_read_fifo(base, qspi->async_rx, dolen);
qspi->async_rx += dolen;
qspi->async_rx_count += dolen;
qspi->async_rx_remain -= dolen;
}
}
if ((sts & ISTS_BIT_TF_EMP) && (sts & ISTS_BIT_TDONE)) {
/* Write 4 bytes 0 to clear TX Buffer,
* Note:
* Every time user send new data, please reset TX FIFO
*/
u32 zeros = 0;
qspi_hw_write_fifo(base, (void *)&zeros, 4);
if (sts & ISTS_BIT_RF_OVF) {
/* ignore the RF_OVF in tx. */
if (!(qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_CPU || qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_DMA))
qspi->status |= HAL_QSPI_STATUS_RX_OVER_FLOW;
}
if (sts & ISTS_BIT_TDONE) {
if (qspi->status == HAL_QSPI_STATUS_IN_PROGRESS)
qspi->status = HAL_QSPI_STATUS_OK;
@@ -255,14 +268,12 @@ void hal_qspi_slave_irq_handler(qspi_slave_handle *h)
qspi_hw_interrupt_disable(base, imsk);
qspi->status |= HAL_QSPI_STATUS_ASYNC_TDONE;
if (QSPI_IS_ASYNC_ALL_DONE(qspi->status, qspi->done_mask)) {
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_DMA) {
qspi->async_rx_count =
qspi->async_rx_remain - qspi_hw_get_idma_rx_len(base);
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_DMA || qspi->work_mode == QSPI_WORK_MODE_ASYNC_DUPLEX_DMA) {
qspi->async_rx_count = qspi->async_rx_remain - qspi_hw_get_idma_rx_len(base);
aicos_dcache_invalid_range(qspi->async_rx, qspi->async_rx_count);
}
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_DMA) {
qspi->async_tx_count =
qspi->async_tx_remain - qspi_hw_get_tx_fifo_cnt(base);
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_DMA || qspi->work_mode == QSPI_WORK_MODE_ASYNC_DUPLEX_DMA) {
qspi->async_tx_count = qspi->async_tx_remain - qspi_hw_get_idma_tx_len(base);
}
if (qspi->cb)
qspi->cb(h, qspi->cb_priv);
@@ -275,6 +286,7 @@ int qspi_slave_transfer_cpu_async(struct qspi_slave_state *qspi,
struct qspi_transfer *t)
{
u32 base, txlen, rxlen;
u32 free_len, dolen;
int ret = 0;
base = qspi_hw_index_to_base(qspi->idx);
@@ -285,9 +297,39 @@ int qspi_slave_transfer_cpu_async(struct qspi_slave_state *qspi,
return -EINVAL;
qspi_hw_reset_fifo(base);
qspi_hw_ctrl_reset(base);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {
if (t->tx_data && t->rx_data) {
if (qspi->bus_width != 1) {
hal_log_err("Full duplex mode did not support. bus width: %d\n", qspi->bus_width);
return -1;
}
qspi->work_mode = QSPI_WORK_MODE_ASYNC_DUPLEX_CPU;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
qspi->async_rx = t->rx_data;
qspi->async_rx_count = 0;
qspi->async_rx_remain = t->data_len;
qspi->async_tx = t->tx_data;
qspi->async_tx_count = 0;
qspi->async_tx_wcnt = 0;
qspi->async_tx_remain = t->data_len;
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
if (qspi->async_tx_remain) {
dolen = min(free_len, qspi->async_tx_remain);
qspi_hw_write_fifo(base, qspi->async_tx, dolen);
qspi->async_tx += dolen;
qspi->async_tx_wcnt += dolen;
qspi->async_tx_remain -= dolen;
}
qspi_hw_set_slave_output_en(base, 1);
qspi_hw_interrupt_enable(base, ICR_BIT_ERRS | ICR_BIT_TDONE_INTE |
ISTS_BIT_TF_RDY | ISTS_BIT_TF_EMP |
ICR_BIT_CS_INV_INTE);
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
} else if (t->tx_data) {
txlen = t->data_len;
qspi->work_mode = QSPI_WORK_MODE_ASYNC_TX_CPU;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
@@ -298,6 +340,16 @@ int qspi_slave_transfer_cpu_async(struct qspi_slave_state *qspi,
qspi->async_tx_count = 0;
qspi->async_tx_wcnt = 0;
qspi->async_tx_remain = txlen;
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
if (qspi->async_tx_remain) {
dolen = min(free_len, qspi->async_tx_remain);
qspi_hw_write_fifo(base, qspi->async_tx, dolen);
qspi->async_tx += dolen;
qspi->async_tx_wcnt += dolen;
qspi->async_tx_remain -= dolen;
}
if (qspi->bus_width > 1)
qspi_hw_set_slave_output_en(base, 1);
else
@@ -338,10 +390,29 @@ static int qspi_slave_transfer_dma_async(struct qspi_slave_state *qspi, struct q
return -EINVAL;
qspi_hw_reset_fifo(base);
qspi_hw_ctrl_reset(base);
qspi_hw_set_idma_busrt_auto_len_en(base, 1);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {
if (t->tx_data && t->rx_data) {
qspi->work_mode = QSPI_WORK_MODE_ASYNC_DUPLEX_DMA;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
qspi->async_tx_remain = t->data_len;
qspi->async_tx = t->tx_data;
qspi->async_rx = t->rx_data;
qspi_hw_set_slave_output_en(base, 1);
aicos_dcache_clean_range(qspi->async_tx, t->data_len);
qspi_hw_set_idma_tx_addr(base, (u32)t->tx_data);
qspi_hw_set_idma_tx_len(base, (u32)t->data_len);
qspi_hw_set_idma_tx_en(base, 1);
qspi->async_rx_remain = t->data_len;
qspi->async_rx = t->rx_data;
qspi_hw_set_idma_rx_addr(base, (u32)t->rx_data);
qspi_hw_set_idma_rx_len(base, (u32)t->data_len);
qspi_hw_set_idma_rx_en(base, 1);
qspi_hw_interrupt_enable(base, ICR_BIT_IDMA_MSK | ICR_BIT_CS_INV_INTE);
} else if (t->tx_data) {
qspi->work_mode = QSPI_WORK_MODE_ASYNC_TX_DMA;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
txlen = t->data_len;
@@ -353,7 +424,9 @@ static int qspi_slave_transfer_dma_async(struct qspi_slave_state *qspi, struct q
qspi_hw_set_idma_tx_addr(base, (u32)t->tx_data);
qspi_hw_set_idma_tx_len(base, (u32)txlen);
qspi_hw_set_idma_tx_en(base, 1);
qspi_hw_interrupt_enable(base, ICR_BIT_IDMA_MSK | ICR_BIT_CS_INV_INTE);
imsk = ICR_BIT_IDMA_MSK | ICR_BIT_CS_INV_INTE;
imsk &= ~ISTS_BIT_RF_OVF;
qspi_hw_interrupt_enable(base, imsk);
} else if (t->rx_data) {
qspi->work_mode = QSPI_WORK_MODE_ASYNC_RX_DMA;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
@@ -381,13 +454,12 @@ static int qspi_slave_can_dma(struct qspi_slave_state *qspi, struct qspi_transfe
return 0;
}
if (t->rx_data) {
/* RX: date length require 4 bytes alignment */
if (t->data_len & 0x3)
return 0;
/* Meet DMA's address align requirement */
if (((unsigned long)t->rx_data) & (AIC_DMA_ALIGN_SIZE - 1))
return 0;
}
if (t->data_len < QSPI_FIFO_DEPTH)
return 0;
return 1;
#else
return 0;
@@ -443,7 +515,9 @@ int hal_qspi_slave_transfer_count(qspi_slave_handle *h)
qspi = (struct qspi_slave_state *)h;
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_CPU) ||
(qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_DMA)) {
(qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_DMA) ||
(qspi->work_mode == QSPI_WORK_MODE_ASYNC_DUPLEX_CPU) ||
(qspi->work_mode == QSPI_WORK_MODE_ASYNC_DUPLEX_DMA)) {
return qspi->async_rx_count;
}
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_CPU) ||

View File

@@ -21,25 +21,6 @@
#include "qspi_hw_v1.0.h"
#endif
void qspi_reg_dump(u32 base)
{
u32 *p, i;
p = (void *)(unsigned long)base;
for (i = 0; i < 40; i++) {
if (i % 4 == 0)
printf("\n0x%lX : ", (unsigned long)p);
printf("%08X ", *p);
p++;
}
printf("\n");
}
void show_freq(char *msg, u32 id, u32 hz)
{
printf("qspi%d %s: %dHz\n", id, msg, hz);
}
void hal_qspi_master_bit_mode_init(u32 base)
{
#if AIC_SUPPORT_SPI_X_WIRE_IN_BIT_MODE == 4
@@ -243,60 +224,9 @@ int hal_qspi_master_set_cs(qspi_master_handle *h, u32 cs_num, bool enable)
return 0;
}
static u32 qspi_master_get_best_div_param(u32 sclk, u32 bus_hz, u32 *div)
{
u32 cdr1_clk, cdr2_clk;
int cdr2, cdr1;
/* Get the best cdr1 param if going to use cdr1 */
cdr1 = 0;
while ((sclk >> cdr1) > bus_hz)
cdr1++;
if (cdr1 > 0xF)
cdr1 = 0xF;
/* Get the best cdr2 param if going to use cdr2 */
cdr2 = (int)(sclk / (bus_hz * 2)) - 1;
if (cdr2 < 0)
cdr2 = 0;
if (cdr2 > 0xFF)
cdr2 = 0xFF;
cdr2_clk = sclk / (2 * cdr2 + 1);
cdr1_clk = sclk >> cdr1;
cdr2_clk = sclk / (2 * cdr2 + 1);
/* cdr1 param vs cdr2 param, use the best */
if (cdr1_clk == bus_hz) {
*div = cdr1;
return 0;
} else if (cdr2_clk == bus_hz) {
*div = cdr2;
return 1;
} else if ((cdr2_clk < bus_hz) && (cdr1_clk < bus_hz)) {
/* Two clks less than expect clk, use the larger one */
if (cdr2_clk > cdr1_clk) {
*div = cdr2;
return 1;
}
*div = cdr1;
return 0;
}
/*
* 1. Two clks great than expect clk, use least one
* 2. There is one clk less than expect clk, use it
*/
if (cdr2_clk < cdr1_clk) {
*div = cdr2;
return 1;
}
*div = cdr1;
return 0;
}
int hal_qspi_master_set_bus_freq(qspi_master_handle *h, u32 bus_hz)
{
u32 base, sclk, divider, div, cal_clk;
u32 base, sclk, divider, div, cal_clk = 0;
struct qspi_master_state *qspi;
CHECK_PARAM(h, -EINVAL);
@@ -359,6 +289,43 @@ int qspi_wait_transfer_done(u32 base, u32 tmo)
return 0;
}
int qspi_fifo_write_read(u32 base, u8 *tx, u8 *rx, u32 len, u32 tmo)
{
u32 free_len, dolen, cnt = 0;
while (len) {
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
while (free_len <= (QSPI_FIFO_DEPTH >> 3)) {
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
continue;
}
dolen = min(free_len, len);
qspi_hw_write_fifo(base, tx, dolen);
while (qspi_hw_get_rx_fifo_cnt(base) != dolen) {
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
}
qspi_hw_read_fifo(base, rx, dolen);
tx += dolen;
rx += dolen;
len -= dolen;
}
/* Data are written to FIFO, waiting all data are sent out */
while (qspi_hw_get_tx_fifo_cnt(base)) {
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
}
return 0;
}
int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo)
{
@@ -416,36 +383,12 @@ int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo)
return 0;
}
u32 qspi_calc_timeout(u32 bus_hz, u32 len)
{
u32 tmo_cnt, tmo_us;
u32 tmo_speed = 100;
if (bus_hz < HAL_QSPI_MIN_FREQ_HZ)
tmo_us = (1000000 * len * 8) / bus_hz;
else if (bus_hz < 1000000)
tmo_us = (1000 * len * 8) / (bus_hz / 1000);
else
tmo_us = (len * 8) / (bus_hz / 1000000);
/* Add 100ms time padding */
tmo_us += 100000;
tmo_cnt = tmo_us / HAL_QSPI_WAIT_PER_CYCLE;
/* Consider the speed limit of DMA or CPU copy.
*/
if (len >= QSPI_TRANSFER_DATA_LEN_1M)
tmo_speed = ((len / QSPI_CPU_DMA_MIN_SPEED_MS) + 1) * 1000;
return max(tmo_cnt, tmo_speed);
}
static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
struct qspi_transfer *t)
{
u32 base, tmo_cnt, txlen, tx_1line_cnt, rxlen, sts;
struct qspi_master_state *qspi;
int ret;
int ret = 0;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(t, -EINVAL);
@@ -463,7 +406,21 @@ static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
tmo_cnt *= 10;
qspi_hw_reset_fifo(base);
if (t->tx_data) {
if (t->tx_data && t->rx_data) {
if (qspi_hw_get_bus_width(base) != QSPI_BUS_WIDTH_SINGLE) {
hal_log_err("Full duplex mode did not support.\n");
goto out;
}
qspi->work_mode = QSPI_WORK_MODE_SYNC_DUPLEX_CPU;
qspi_hw_set_transfer_cnt(base, t->data_len, t->data_len, 0, 0);
qspi_hw_drop_invalid_data(base, QSPI_RECV_ALL_INPUT_DATA);
qspi_hw_start_transfer(base);
ret = qspi_fifo_write_read(base, t->tx_data, t->rx_data, t->data_len, tmo_cnt);
if (ret < 0) {
hal_log_err("read write fifo failure.\n");
goto out;
}
} else if (t->tx_data) {
txlen = t->data_len;
tx_1line_cnt = 0;
if (qspi_hw_get_bus_width(base) == QSPI_BUS_WIDTH_SINGLE)
@@ -494,13 +451,14 @@ static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
goto out;
}
out:
qspi_hw_drop_invalid_data(base, QSPI_DROP_INVALID_DATA);
qspi_hw_get_interrupt_status(base, &sts);
qspi_hw_clear_interrupt_status(base, sts);
return ret;
}
#ifdef AIC_DMA_DRV
const u32 dynamic_dma_table[] = {
static const u32 dynamic_dma_table[] = {
#ifdef AIC_QSPI1_DYNAMIC_DMA
1,
#endif
@@ -617,6 +575,118 @@ static int qspi_master_wait_dma_done(struct aic_dma_chan *ch, u32 tmo)
return 0;
}
static int qspi_txrx_dma_sync(qspi_master_handle *h,
struct qspi_transfer *t, u32 tmo_cnt)
{
struct qspi_master_state *qspi;
struct dma_slave_config dmacfg;
u32 base;
int ret;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(t, -EINVAL);
qspi = (struct qspi_master_state *)h;
base = qspi_hw_index_to_base(qspi->idx);
if (qspi_hw_get_bus_width(base) != QSPI_BUS_WIDTH_SINGLE) {
hal_log_err("Full duplex mode did not support.\n");
return -1;
}
qspi->work_mode = QSPI_WORK_MODE_SYNC_DUPLEX_DMA;
qspi_hw_tx_dma_enable(base);
qspi_hw_rx_dma_enable(base);
qspi_hw_set_transfer_cnt(base, t->data_len, t->data_len, 0, 0);
qspi_hw_drop_invalid_data(base, QSPI_RECV_ALL_INPUT_DATA);
/* config tx DMA channel */
dmacfg.direction = DMA_MEM_TO_DEV;
dmacfg.slave_id = qspi->dma_cfg.port_id;
dmacfg.src_addr = (unsigned long)t->tx_data;
dmacfg.dst_addr = (unsigned long)QSPI_REG_TXD(base);
dmacfg.src_addr_width = qspi->dma_cfg.mem_bus_width;
dmacfg.src_maxburst = qspi->dma_cfg.mem_max_burst;
if (!(t->data_len % HAL_QSPI_DMA_4BYTES_LINE))
dmacfg.dst_addr_width = qspi->dma_cfg.dev_bus_width;
else
dmacfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dmacfg.dst_maxburst = qspi->dma_cfg.dev_max_burst;
ret = hal_dma_chan_config(qspi->dma_tx, &dmacfg);
if (ret < 0) {
hal_log_err("TX dma chan config failure.\n");
return -1;
}
ret = hal_dma_chan_prep_device(qspi->dma_tx, PTR2U32(QSPI_REG_TXD(base)),
PTR2U32(t->tx_data), t->data_len,
DMA_MEM_TO_DEV);
if (ret < 0) {
hal_log_err("TX dma chan prepare failure.\n");
return -1;
}
ret = hal_dma_chan_start(qspi->dma_tx);
if (ret < 0) {
hal_log_err("TX dma chan start failure.\n");
return -1;
}
/* config rx DMA channel */
dmacfg.direction = DMA_DEV_TO_MEM;
dmacfg.slave_id = qspi->dma_cfg.port_id;
dmacfg.src_addr = (unsigned long)QSPI_REG_RXD(base);
dmacfg.dst_addr = (unsigned long)t->rx_data;
if (!(t->data_len % HAL_QSPI_DMA_4BYTES_LINE))
dmacfg.src_addr_width = qspi->dma_cfg.dev_bus_width;
else
dmacfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dmacfg.src_maxburst = qspi->dma_cfg.mem_max_burst;
dmacfg.dst_addr_width = qspi->dma_cfg.dev_bus_width;
dmacfg.dst_maxburst = qspi->dma_cfg.dev_max_burst;
ret = hal_dma_chan_config(qspi->dma_rx, &dmacfg);
if (ret < 0) {
hal_log_err("RX dma chan config failure.\n");
return -1;
}
ret = hal_dma_chan_prep_device(qspi->dma_rx, PTR2U32(t->rx_data),
PTR2U32(QSPI_REG_RXD(base)), t->data_len,
DMA_DEV_TO_MEM);
if (ret < 0) {
hal_log_err("RX dma chan prepare failure.\n");
return -1;
}
ret = hal_dma_chan_start(qspi->dma_rx);
if (ret < 0) {
hal_log_err("RX dma chan start failure.\n");
return -1;
}
qspi_hw_start_transfer(base);
ret = qspi_wait_transfer_done(base, tmo_cnt);
if (ret < 0) {
hal_log_err("Wait transfer done timeout.\n");
}
ret = qspi_master_wait_dma_done(qspi->dma_rx, tmo_cnt);
if (ret < 0) {
hal_log_err("RX wait dma done timeout.\n");
}
qspi_hw_drop_invalid_data(base, QSPI_DROP_INVALID_DATA);
qspi_hw_rx_dma_disable(base);
qspi_hw_tx_dma_disable(base);
hal_dma_chan_stop(qspi->dma_rx);
hal_dma_chan_stop(qspi->dma_tx);
if (qspi_master_dynamic_dma(qspi)) {
hal_release_dma_chan(qspi->dma_tx);
hal_release_dma_chan(qspi->dma_rx);
}
return ret;
}
static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
struct qspi_transfer *t)
{
@@ -624,7 +694,7 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
struct qspi_master_state *qspi;
struct aic_dma_chan *dma_rx, *dma_tx;
struct dma_slave_config dmacfg;
int ret;
int ret = 0;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(t, -EINVAL);
@@ -641,7 +711,9 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, t->data_len);
qspi_hw_reset_fifo(base);
if (t->tx_data) {
if (t->tx_data && t->rx_data) {
ret = qspi_txrx_dma_sync(h, t, tmo_cnt);
} else if (t->tx_data) {
txlen = t->data_len;
tx_1line_cnt = 0;
if (qspi_hw_get_bus_width(base) == QSPI_BUS_WIDTH_SINGLE)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Artinchip Technology Co., Ltd
* Copyright (c) 2022-2024, ArtInChip Technology Co., Ltd
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -17,25 +17,6 @@
#include "qspi_hw_v2.0.h"
void qspi_reg_dump(u32 base)
{
u32 *p, i;
p = (void *)(unsigned long)base;
for (i = 0; i < 40; i++) {
if (i % 4 == 0)
printf("\n0x%lX : ", (unsigned long)p);
printf("%08X ", *p);
p++;
}
printf("\n");
}
void show_freq(char *msg, u32 id, u32 hz)
{
printf("qspi%d %s: %dHz\n", id, msg, hz);
}
void hal_qspi_master_bit_mode_init(u32 base)
{
#if AIC_SUPPORT_SPI_X_WIRE_IN_BIT_MODE == 4
@@ -276,57 +257,6 @@ int hal_qspi_master_set_cs(qspi_master_handle *h, u32 cs_num, bool enable)
return 0;
}
static u32 qspi_master_get_best_div_param(u32 sclk, u32 bus_hz, u32 *div)
{
u32 cdr1_clk, cdr2_clk;
int cdr2, cdr1;
/* Get the best cdr1 param if going to use cdr1 */
cdr1 = 0;
while ((sclk >> cdr1) > bus_hz)
cdr1++;
if (cdr1 > 0xF)
cdr1 = 0xF;
/* Get the best cdr2 param if going to use cdr2 */
cdr2 = (int)(sclk / (bus_hz * 2)) - 1;
if (cdr2 < 0)
cdr2 = 0;
if (cdr2 > 0xFF)
cdr2 = 0xFF;
cdr2_clk = sclk / (2 * cdr2 + 1);
cdr1_clk = sclk >> cdr1;
cdr2_clk = sclk / (2 * cdr2 + 1);
/* cdr1 param vs cdr2 param, use the best */
if (cdr1_clk == bus_hz) {
*div = cdr1;
return 0;
} else if (cdr2_clk == bus_hz) {
*div = cdr2;
return 1;
} else if ((cdr2_clk < bus_hz) && (cdr1_clk < bus_hz)) {
/* Two clks less than expect clk, use the larger one */
if (cdr2_clk > cdr1_clk) {
*div = cdr2;
return 1;
}
*div = cdr1;
return 0;
}
/*
* 1. Two clks great than expect clk, use least one
* 2. There is one clk less than expect clk, use it
*/
if (cdr2_clk < cdr1_clk) {
*div = cdr2;
return 1;
}
*div = cdr1;
return 0;
}
int hal_qspi_master_set_bus_freq(qspi_master_handle *h, u32 bus_hz)
{
u32 base, sclk, divider, div;
@@ -427,6 +357,44 @@ int qspi_wait_gpdma_rx_done(u32 base, u32 tmo)
}
#endif
int qspi_fifo_write_read(u32 base, u8 *tx, u8 *rx, u32 len, u32 tmo)
{
u32 free_len, dolen, cnt = 0;
while (len) {
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
while (free_len <= (QSPI_FIFO_DEPTH >> 3)) {
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
continue;
}
dolen = min(free_len, len);
qspi_hw_write_fifo(base, tx, dolen);
while (qspi_hw_get_rx_fifo_cnt(base) != dolen) {
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
}
qspi_hw_read_fifo(base, rx, dolen);
tx += dolen;
rx += dolen;
len -= dolen;
}
/* Data are written to FIFO, waiting all data are sent out */
while (qspi_hw_get_tx_fifo_cnt(base)) {
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
}
return 0;
}
int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo)
{
u32 dolen, free_len, cnt = 0;
@@ -483,36 +451,12 @@ int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo)
return 0;
}
u32 qspi_calc_timeout(u32 bus_hz, u32 len)
{
u32 tmo_cnt, tmo_us;
u32 tmo_speed = 100;
if (bus_hz < HAL_QSPI_MIN_FREQ_HZ)
tmo_us = (1000000 * len * 8) / bus_hz;
else if (bus_hz < 1000000)
tmo_us = (1000 * len * 8) / (bus_hz / 1000);
else
tmo_us = (len * 8) / (bus_hz / 1000000);
/* Add 100ms time padding */
tmo_us += 100000;
tmo_cnt = tmo_us / HAL_QSPI_WAIT_PER_CYCLE;
/* Consider the speed limit of DMA or CPU copy.
*/
if (len >= QSPI_TRANSFER_DATA_LEN_1M)
tmo_speed = ((len / QSPI_CPU_DMA_MIN_SPEED_MS) + 1) * 1000;
return max(tmo_cnt, tmo_speed);
}
static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
struct qspi_transfer *t)
{
u32 base, tmo_cnt, txlen, tx_1line_cnt, rxlen, sts;
struct qspi_master_state *qspi;
int ret;
int ret = 0;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(t, -EINVAL);
@@ -530,7 +474,21 @@ static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
tmo_cnt *= 10;
qspi_hw_reset_fifo(base);
if (t->tx_data) {
if (t->tx_data && t->rx_data) {
if (qspi_hw_get_bus_width(base) != QSPI_BUS_WIDTH_SINGLE) {
hal_log_err("Full duplex mode did not support.\n");
goto out;
}
qspi->work_mode = QSPI_WORK_MODE_SYNC_DUPLEX_CPU;
qspi_hw_set_transfer_cnt(base, t->data_len, t->data_len, 0, 0);
qspi_hw_drop_invalid_data(base, QSPI_RECV_ALL_INPUT_DATA);
qspi_hw_start_transfer(base);
ret = qspi_fifo_write_read(base, t->tx_data, t->rx_data, t->data_len, tmo_cnt);
if (ret < 0) {
hal_log_err("read write fifo failure.\n");
goto out;
}
} else if (t->tx_data) {
txlen = t->data_len;
tx_1line_cnt = 0;
if (qspi_hw_get_bus_width(base) == QSPI_BUS_WIDTH_SINGLE)
@@ -581,6 +539,115 @@ static int qspi_master_wait_dma_done(struct aic_dma_chan *ch, u32 tmo)
return 0;
}
static int qspi_txrx_dma_sync(qspi_master_handle *h,
struct qspi_transfer *t, u32 tmo_cnt)
{
struct qspi_master_state *qspi;
struct dma_slave_config dmacfg;
u32 base;
int ret;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(t, -EINVAL);
qspi = (struct qspi_master_state *)h;
base = qspi_hw_index_to_base(qspi->idx);
if (qspi_hw_get_bus_width(base) != QSPI_BUS_WIDTH_SINGLE) {
hal_log_err("Full duplex mode did not support.\n");
return -1;
}
qspi->work_mode = QSPI_WORK_MODE_SYNC_DUPLEX_DMA;
qspi_hw_tx_dma_enable(base);
qspi_hw_rx_dma_enable(base);
qspi_hw_set_transfer_cnt(base, t->data_len, t->data_len, 0, 0);
qspi_hw_drop_invalid_data(base, QSPI_RECV_ALL_INPUT_DATA);
/* config tx DMA channel */
dmacfg.direction = DMA_MEM_TO_DEV;
dmacfg.slave_id = qspi->dma_cfg.port_id;
dmacfg.src_addr = (unsigned long)t->tx_data;
dmacfg.dst_addr = (unsigned long)QSPI_REG_TXD(base);
dmacfg.src_addr_width = qspi->dma_cfg.mem_bus_width;
dmacfg.src_maxburst = qspi->dma_cfg.mem_max_burst;
if (!(t->data_len % HAL_QSPI_DMA_4BYTES_LINE))
dmacfg.dst_addr_width = qspi->dma_cfg.dev_bus_width;
else
dmacfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dmacfg.dst_maxburst = qspi->dma_cfg.dev_max_burst;
ret = hal_dma_chan_config(qspi->dma_tx, &dmacfg);
if (ret < 0) {
hal_log_err("TX dma chan config failure.\n");
return -1;
}
ret = hal_dma_prep_mode_device(qspi->dma_tx, PTR2U32(QSPI_REG_TXD(base)),
PTR2U32(t->tx_data), t->data_len,
DMA_MEM_TO_DEV, TYPE_IO_FAST);
if (ret < 0) {
hal_log_err("TX dma chan prepare failure.\n");
return -1;
}
ret = hal_dma_chan_start(qspi->dma_tx);
if (ret < 0) {
hal_log_err("TX dma chan start failure.\n");
return -1;
}
/* config rx DMA channel */
dmacfg.direction = DMA_DEV_TO_MEM;
dmacfg.slave_id = qspi->dma_cfg.port_id;
dmacfg.src_addr = (unsigned long)QSPI_REG_RXD(base);
dmacfg.dst_addr = (unsigned long)t->rx_data;
if (!(t->data_len % HAL_QSPI_DMA_4BYTES_LINE))
dmacfg.src_addr_width = qspi->dma_cfg.dev_bus_width;
else
dmacfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dmacfg.src_maxburst = qspi->dma_cfg.mem_max_burst;
dmacfg.dst_addr_width = qspi->dma_cfg.dev_bus_width;
dmacfg.dst_maxburst = qspi->dma_cfg.dev_max_burst;
ret = hal_dma_chan_config(qspi->dma_rx, &dmacfg);
if (ret < 0) {
hal_log_err("RX dma chan config failure.\n");
return -1;
}
ret = hal_dma_prep_mode_device(qspi->dma_rx, PTR2U32(t->rx_data),
PTR2U32(QSPI_REG_RXD(base)), t->data_len,
DMA_DEV_TO_MEM, TYPE_IO_FAST);
if (ret < 0) {
hal_log_err("RX dma chan prepare failure.\n");
return -1;
}
ret = hal_dma_chan_start(qspi->dma_rx);
if (ret < 0) {
hal_log_err("RX dma chan start failure.\n");
return -1;
}
qspi_hw_start_transfer(base);
ret = qspi_wait_transfer_done(base, tmo_cnt);
if (ret < 0) {
hal_log_err("Wait transfer done timeout.\n");
}
ret = qspi_master_wait_dma_done(qspi->dma_rx, tmo_cnt);
if (ret < 0) {
hal_log_err("RX wait dma done timeout.\n");
}
qspi_hw_drop_invalid_data(base, QSPI_DROP_INVALID_DATA);
qspi_hw_rx_dma_disable(base);
qspi_hw_tx_dma_disable(base);
hal_dma_chan_stop(qspi->dma_rx);
hal_dma_chan_stop(qspi->dma_tx);
return ret;
}
static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
struct qspi_transfer *t)
{
@@ -588,7 +655,7 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
struct qspi_master_state *qspi;
struct aic_dma_chan *dma_rx, *dma_tx;
struct dma_slave_config dmacfg;
int ret;
int ret = 0;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(t, -EINVAL);
@@ -605,7 +672,9 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, t->data_len);
qspi_hw_reset_fifo(base);
if (t->tx_data) {
if (t->tx_data && t->rx_data) {
ret = qspi_txrx_dma_sync(h, t, tmo_cnt);
} else if (t->tx_data) {
txlen = t->data_len;
tx_1line_cnt = 0;
if (qspi_hw_get_bus_width(base) == QSPI_BUS_WIDTH_SINGLE)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Artinchip Technology Co., Ltd
* Copyright (c) 2022-2024, ArtInChip Technology Co., Ltd
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -444,6 +444,15 @@ static inline void qspi_hw_drop_invalid_data(u32 base, bool drop)
writel(val, QSPI_REG_TCFG(base));
}
static inline void qspi_hw_ctrl_reset(u32 base)
{
u32 val;
val = readl(QSPI_REG_CFG(base));
val |= CFG_BIT_CTRL_RST_MSK;
writel(val, QSPI_REG_CFG(base));
}
static inline void qspi_hw_reset_fifo(u32 base)
{
u32 val = readl(QSPI_REG_FCTL(base));

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Artinchip Technology Co., Ltd
* Copyright (c) 2022-2024, ArtInChip Technology Co., Ltd
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -595,6 +595,15 @@ static inline void qspi_hw_drop_invalid_data(u32 base, bool drop)
writel(val, QSPI_REG_TCFG(base));
}
static inline void qspi_hw_ctrl_reset(u32 base)
{
u32 val;
val = readl(QSPI_REG_CFG(base));
val |= CFG_BIT_CTRL_RST_MSK;
writel(val, QSPI_REG_CFG(base));
}
static inline void qspi_hw_reset_fifo(u32 base)
{
u32 val = readl(QSPI_REG_FCTL(base));

View File

@@ -0,0 +1,110 @@
/*
* Copyright (c) 2024, ArtInChip Technology Co., Ltd
*
* SPDX-License-Identifier: Apache-2.0
*
* Authors: Jiji Chen <jiji.chen@artinchip.com>
*/
#include <rtconfig.h>
#include <stdbool.h>
#include <string.h>
#include <stdint.h>
#include <aic_core.h>
#include <hal_qspi.h>
#include "qspi_internal.h"
void qspi_reg_dump(u32 base)
{
u32 *p, i;
p = (void *)(unsigned long)base;
for (i = 0; i < 40; i++) {
if (i % 4 == 0)
printf("\n0x%lX : ", (unsigned long)p);
printf("%08X ", *p);
p++;
}
printf("\n");
}
void show_freq(char *msg, u32 id, u32 hz)
{
printf("qspi%d %s: %dHz\n", id, msg, hz);
}
u32 qspi_calc_timeout(u32 bus_hz, u32 len)
{
u32 tmo_cnt, tmo_us;
u32 tmo_speed = 100;
if (bus_hz < HAL_QSPI_MIN_FREQ_HZ)
tmo_us = (1000000 * len * 8) / bus_hz;
else if (bus_hz < 1000000)
tmo_us = (1000 * len * 8) / (bus_hz / 1000);
else
tmo_us = (len * 8) / (bus_hz / 1000000);
/* Add 100ms time padding */
tmo_us += 100000;
tmo_cnt = tmo_us / HAL_QSPI_WAIT_PER_CYCLE;
/* Consider the speed limit of DMA or CPU copy.
*/
if (len >= QSPI_TRANSFER_DATA_LEN_1M)
tmo_speed = ((len / QSPI_CPU_DMA_MIN_SPEED_MS) + 1) * 1000;
return max(tmo_cnt, tmo_speed);
}
u32 qspi_master_get_best_div_param(u32 sclk, u32 bus_hz, u32 *div)
{
u32 cdr1_clk, cdr2_clk;
int cdr2, cdr1;
/* Get the best cdr1 param if going to use cdr1 */
cdr1 = 0;
while ((sclk >> cdr1) > bus_hz)
cdr1++;
if (cdr1 > 0xF)
cdr1 = 0xF;
/* Get the best cdr2 param if going to use cdr2 */
cdr2 = (int)(sclk / (bus_hz * 2)) - 1;
if (cdr2 < 0)
cdr2 = 0;
if (cdr2 > 0xFF)
cdr2 = 0xFF;
cdr2_clk = sclk / (2 * cdr2 + 1);
cdr1_clk = sclk >> cdr1;
cdr2_clk = sclk / (2 * cdr2 + 1);
/* cdr1 param vs cdr2 param, use the best */
if (cdr1_clk == bus_hz) {
*div = cdr1;
return 0;
} else if (cdr2_clk == bus_hz) {
*div = cdr2;
return 1;
} else if ((cdr2_clk < bus_hz) && (cdr1_clk < bus_hz)) {
/* Two clks less than expect clk, use the larger one */
if (cdr2_clk > cdr1_clk) {
*div = cdr2;
return 1;
}
*div = cdr1;
return 0;
}
/*
* 1. Two clks great than expect clk, use least one
* 2. There is one clk less than expect clk, use it
*/
if (cdr2_clk < cdr1_clk) {
*div = cdr2;
return 1;
}
*div = cdr1;
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Artinchip Technology Co., Ltd
* Copyright (c) 2022-2024, ArtInChip Technology Co., Ltd
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -13,12 +13,16 @@
extern "C" {
#endif
#define QSPI_WORK_MODE_SYNC_RX_CPU 0
#define QSPI_WORK_MODE_SYNC_TX_CPU 1
#define QSPI_WORK_MODE_ASYNC_RX_CPU 2
#define QSPI_WORK_MODE_ASYNC_TX_CPU 3
#define QSPI_WORK_MODE_ASYNC_RX_DMA 4
#define QSPI_WORK_MODE_ASYNC_TX_DMA 5
#define QSPI_WORK_MODE_SYNC_RX_CPU 0
#define QSPI_WORK_MODE_SYNC_TX_CPU 1
#define QSPI_WORK_MODE_ASYNC_RX_CPU 2
#define QSPI_WORK_MODE_ASYNC_TX_CPU 3
#define QSPI_WORK_MODE_ASYNC_RX_DMA 4
#define QSPI_WORK_MODE_ASYNC_TX_DMA 5
#define QSPI_WORK_MODE_SYNC_DUPLEX_CPU 6
#define QSPI_WORK_MODE_SYNC_DUPLEX_DMA 7
#define QSPI_WORK_MODE_ASYNC_DUPLEX_CPU 8
#define QSPI_WORK_MODE_ASYNC_DUPLEX_DMA 9
#define HAL_QSPI_STATUS_INTERNAL_MSK (0xFFFFUL << 16)
#define HAL_QSPI_STATUS_ASYNC_TDONE (0x1UL << 16)
@@ -30,13 +34,14 @@ extern "C" {
#define QSPI_TRANSFER_DATA_LEN_1M 0x100000
#define QSPI_CPU_DMA_MIN_SPEED_MS (0x800000 >> 10)
void qspi_reg_dump(u32 base);
void show_freq(char *msg, u32 id, u32 hz);
void hal_qspi_fifo_reset(u32 base, u32 fifo);
void hal_qspi_show_ists(u32 id, u32 sts);
u32 qspi_calc_timeout(u32 bus_hz, u32 len);
u32 qspi_master_get_best_div_param(u32 sclk, u32 bus_hz, u32 *div);
int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo);
int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo_us);
int qspi_wait_transfer_done(u32 base, u32 tmo);
u32 qspi_calc_timeout(u32 bus_hz, u32 len);
#ifdef __cplusplus
}