This commit is contained in:
刘可亮
2024-06-04 19:00:30 +08:00
parent 990c72f5be
commit 0a13af6a1d
1668 changed files with 342810 additions and 37726 deletions

View File

@@ -1,452 +0,0 @@
/*
* Copyright (c) 2022, Artinchip Technology Co., Ltd
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <rtconfig.h>
#include <stdbool.h>
#include <string.h>
#include <stdint.h>
#include <aic_common.h>
#include <aic_core.h>
#include <aic_hal.h>
#include <hal_dma.h>
#include <hal_qspi.h>
#include "qspi_internal.h"
#ifdef AIC_QSPI_DRV_V11
#include "qspi_hw_v1.1.h"
int hal_qspi_slave_init(qspi_slave_handle *h, struct qspi_slave_config *cfg)
{
struct qspi_slave_state *qspi;
u32 base, sclk;
int ret;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(cfg, -EINVAL);
qspi = (struct qspi_slave_state *)h;
base = qspi_hw_index_to_base(cfg->idx);
if (base == QSPI_INVALID_BASE) {
hal_log_err("invalid spi controller index %d\n", cfg->idx);
return -ENODEV;
}
sclk = cfg->clk_in_hz;
if (sclk > HAL_QSPI_MAX_FREQ_HZ)
sclk = HAL_QSPI_MAX_FREQ_HZ;
else if (sclk < HAL_QSPI_MIN_FREQ_HZ)
sclk = HAL_QSPI_MIN_FREQ_HZ;
qspi->idx = cfg->idx;
show_freq("freq (input)", qspi->idx, sclk);
hal_clk_set_freq(cfg->clk_id, sclk);
ret = hal_clk_enable(cfg->clk_id);
if (ret < 0) {
hal_log_err("QSPI %d clk enable failed!\n", cfg->idx);
return -EFAULT;
}
ret = hal_clk_enable_deassertrst(cfg->clk_id);
if (ret < 0) {
hal_log_err("QSPI %d reset deassert failed!\n", cfg->idx);
return -EFAULT;
}
qspi_hw_init_default(base);
qspi_hw_set_ctrl_mode(base, QSPI_CTRL_MODE_SLAVE);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
qspi_hw_set_cpol(base, cfg->cpol);
qspi_hw_set_cpha(base, cfg->cpha);
qspi_hw_set_lsb_en(base, cfg->lsb_en);
qspi_hw_set_cs_polarity(base, cfg->cs_polarity);
if (cfg->cs_polarity == QSPI_CS_POL_VALID_LOW)
qspi_hw_set_cs_level(base, QSPI_CS_LEVEL_HIGH);
else
qspi_hw_set_cs_level(base, QSPI_CS_LEVEL_LOW);
if (cfg->cs_auto)
qspi_hw_set_cs_owner(base, QSPI_CS_CTL_BY_HW);
else
qspi_hw_set_cs_owner(base, QSPI_CS_CTL_BY_SW);
qspi_hw_drop_invalid_data(base, QSPI_DROP_INVALID_DATA);
qspi_hw_reset_fifo(base);
qspi_hw_set_fifo_watermark(base, QSPI_TX_WATERMARK, QSPI_RX_WATERMARK);
qspi->clk_id = cfg->clk_id;
qspi->cb = NULL;
qspi->cb_priv = NULL;
return 0;
}
void hal_qspi_slave_fifo_reset(qspi_slave_handle *h, u32 fifo)
{
struct qspi_slave_state *qspi;
u32 base;
CHECK_PARAM_RET(h);
qspi = (struct qspi_slave_state *)h;
base = qspi_hw_index_to_base(qspi->idx);
hal_qspi_fifo_reset(base, fifo);
}
int hal_qspi_slave_deinit(qspi_slave_handle *h)
{
struct qspi_slave_state *qspi;
CHECK_PARAM(h, -EINVAL);
qspi = (struct qspi_slave_state *)h;
qspi->cb = NULL;
qspi->cb_priv = NULL;
qspi->async_tx = NULL;
qspi->async_rx = NULL;
qspi->async_tx_remain = 0;
qspi->async_rx_remain = 0;
return 0;
}
int hal_qspi_slave_set_bus_width(qspi_slave_handle *h, u32 bus_width)
{
struct qspi_slave_state *qspi;
u32 base;
CHECK_PARAM(h, -EINVAL);
qspi = (struct qspi_slave_state *)h;
base = qspi_hw_index_to_base(qspi->idx);
qspi_hw_set_bus_width(base, bus_width);
qspi->bus_width = bus_width;
if (qspi->bus_width == 0)
qspi->bus_width = QSPI_BUS_WIDTH_SINGLE;
return 0;
}
int hal_qspi_slave_register_cb(qspi_slave_handle *h, qspi_slave_async_cb cb, void *priv)
{
struct qspi_slave_state *qspi;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(cb, -EINVAL);
qspi = (struct qspi_slave_state *)h;
qspi->cb = cb;
qspi->cb_priv = priv;
return 0;
}
int hal_qspi_slave_get_status(qspi_slave_handle *h)
{
struct qspi_slave_state *qspi;
CHECK_PARAM(h, -EINVAL);
qspi = (struct qspi_slave_state *)h;
return (qspi->status) & (~HAL_QSPI_STATUS_INTERNAL_MSK);
}
void hal_qspi_show_ists(u32 id, u32 sts)
{
if (sts) {
printf("QSPI%d:\n", id);
}
if (sts & ISTS_BIT_RF_RDY)
printf(" ISTS_BIT_RF_RDY\n");
if (sts & ISTS_BIT_RF_EMP)
printf(" ISTS_BIT_RF_EMP\n");
if (sts & ISTS_BIT_RF_FUL)
printf(" ISTS_BIT_RF_FUL\n");
if (sts & ISTS_BIT_TF_RDY)
printf(" ISTS_BIT_TF_RDY\n");
if (sts & ISTS_BIT_TF_EMP)
printf(" ISTS_BIT_TF_EMP\n");
if (sts & ISTS_BIT_TF_FUL)
printf(" ISTS_BIT_TF_FUL\n");
if (sts & ISTS_BIT_RF_OVF)
printf(" ISTS_BIT_RF_OVF\n");
if (sts & ISTS_BIT_RF_UDR)
printf(" ISTS_BIT_RF_UDR\n");
if (sts & ISTS_BIT_TF_OVF)
printf(" ISTS_BIT_TF_OVF\n");
if (sts & ISTS_BIT_TF_UDR)
printf(" ISTS_BIT_TF_UDR\n");
if (sts & ISTS_BIT_CS_INV)
printf(" ISTS_BIT_CS_INV\n");
if (sts & ISTS_BIT_TDONE)
printf(" ISTS_BIT_TDONE\n");
}
void hal_qspi_slave_irq_handler(qspi_slave_handle *h)
{
struct qspi_slave_state *qspi;
u32 base, sts, imsk;
CHECK_PARAM_RET(h);
qspi = (struct qspi_slave_state *)h;
base = qspi_hw_index_to_base(qspi->idx);
qspi_hw_get_interrupt_status(base, &sts);
if (sts & ISTS_BIT_TF_OVF)
qspi->status |= HAL_QSPI_STATUS_TX_OVER_FLOW;
if ((sts & ISTS_BIT_TF_EMP) || (sts & ISTS_BIT_TF_RDY)) {
u32 dolen, free_len;
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_CPU) &&
qspi->async_tx) {
u32 total;
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
if (qspi->async_tx_remain) {
dolen = min(free_len, qspi->async_tx_remain);
qspi_hw_write_fifo(base, qspi->async_tx, dolen);
qspi->async_tx += dolen;
qspi->async_tx_wcnt += dolen;
qspi->async_tx_remain -= dolen;
} else {
imsk = ISTS_BIT_TF_EMP | ISTS_BIT_TF_RDY;
qspi_hw_interrupt_disable(base, imsk);
}
total = qspi->async_tx_remain + qspi->async_tx_wcnt;
qspi->async_tx_count = total - qspi_hw_get_tx_fifo_cnt(base);
}
}
if (sts & ISTS_BIT_RF_UDR)
qspi->status |= HAL_QSPI_STATUS_RX_UNDER_RUN;
if (sts & ISTS_BIT_RF_OVF)
qspi->status |= HAL_QSPI_STATUS_RX_OVER_FLOW;
if ((sts & ISTS_BIT_RF_FUL) || (sts & ISTS_BIT_RF_RDY) || (sts & ISTS_BIT_TDONE)) {
u32 dolen;
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_CPU) && qspi->async_rx &&
qspi->async_rx_remain) {
dolen = qspi_hw_get_rx_fifo_cnt(base);
if (dolen > qspi->async_rx_remain)
dolen = qspi->async_rx_remain;
qspi_hw_read_fifo(base, qspi->async_rx, dolen);
qspi->async_rx += dolen;
qspi->async_rx_count += dolen;
qspi->async_rx_remain -= dolen;
}
}
if ((sts & ISTS_BIT_TF_EMP) && (sts & ISTS_BIT_TDONE)) {
/* Write 4 bytes 0 to clear TX Buffer,
* Note:
* Every time user send new data, please reset TX FIFO
*/
u32 zeros = 0;
qspi_hw_write_fifo(base, (void *)&zeros, 4);
}
if (sts & ISTS_BIT_TDONE) {
if (qspi->status == HAL_QSPI_STATUS_IN_PROGRESS)
qspi->status = HAL_QSPI_STATUS_OK;
else
qspi->status &= ~HAL_QSPI_STATUS_IN_PROGRESS;
imsk = ICR_BIT_ALL_MSK;
imsk &= ~ICR_BIT_TDONE_INTE;
imsk &= ~ICR_BIT_CS_INV_INTE;
qspi_hw_interrupt_disable(base, imsk);
qspi->status |= HAL_QSPI_STATUS_ASYNC_TDONE;
if (QSPI_IS_ASYNC_ALL_DONE(qspi->status, qspi->done_mask)) {
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_DMA) {
qspi->async_rx_count =
qspi->async_rx_remain - qspi_hw_get_idma_rx_len(base);
aicos_dcache_invalid_range(qspi->async_rx, qspi->async_rx_count);
}
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_DMA) {
qspi->async_tx_count =
qspi->async_tx_remain - qspi_hw_get_tx_fifo_cnt(base);
}
if (qspi->cb)
qspi->cb(h, qspi->cb_priv);
}
}
qspi_hw_clear_interrupt_status(base, sts);
}
int qspi_slave_transfer_cpu_async(struct qspi_slave_state *qspi,
struct qspi_transfer *t)
{
u32 base, txlen, rxlen;
int ret = 0;
base = qspi_hw_index_to_base(qspi->idx);
if ((t->tx_data == NULL) && (t->rx_data == NULL))
return -EINVAL;
if (t->data_len == 0)
return -EINVAL;
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {
txlen = t->data_len;
qspi->work_mode = QSPI_WORK_MODE_ASYNC_TX_CPU;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
qspi->async_rx = NULL;
qspi->async_rx_count = 0;
qspi->async_rx_remain = 0;
qspi->async_tx = t->tx_data;
qspi->async_tx_count = 0;
qspi->async_tx_wcnt = 0;
qspi->async_tx_remain = txlen;
if (qspi->bus_width > 1)
qspi_hw_set_slave_output_en(base, 1);
else
qspi_hw_set_slave_output_en(base, 0);
qspi_hw_interrupt_enable(base, ICR_BIT_ERRS | ICR_BIT_TDONE_INTE |
ISTS_BIT_TF_RDY | ISTS_BIT_TF_EMP |
ICR_BIT_CS_INV_INTE);
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
} else if (t->rx_data) {
rxlen = t->data_len;
qspi->work_mode = QSPI_WORK_MODE_ASYNC_RX_CPU;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
qspi->async_tx = NULL;
qspi->async_tx_count = 0;
qspi->async_tx_remain = 0;
qspi->async_rx = t->rx_data;
qspi->async_rx_count = 0;
qspi->async_rx_remain = rxlen;
qspi_hw_set_slave_output_en(base, 0);
qspi_hw_interrupt_enable(base, ICR_BIT_ERRS | ICR_BIT_TDONE_INTE |
ICR_BIT_CS_INV_INTE);
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
}
return ret;
}
static int qspi_slave_transfer_dma_async(struct qspi_slave_state *qspi, struct qspi_transfer *t)
{
u32 base, txlen, rxlen, imsk;
int ret = 0;
base = qspi_hw_index_to_base(qspi->idx);
if ((t->tx_data == NULL) && (t->rx_data == NULL))
return -EINVAL;
if (t->data_len == 0)
return -EINVAL;
qspi_hw_set_idma_busrt_auto_len_en(base, 1);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {
qspi->work_mode = QSPI_WORK_MODE_ASYNC_TX_DMA;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
txlen = t->data_len;
qspi->async_tx_remain = txlen;
qspi->async_tx = t->tx_data;
aicos_dcache_clean_range(qspi->async_tx, txlen);
if (qspi->bus_width > 1)
qspi_hw_set_slave_output_en(base, 1);
qspi_hw_set_idma_tx_addr(base, (u32)t->tx_data);
qspi_hw_set_idma_tx_len(base, (u32)txlen);
qspi_hw_set_idma_tx_en(base, 1);
qspi_hw_interrupt_enable(base, ICR_BIT_IDMA_MSK | ICR_BIT_CS_INV_INTE);
} else if (t->rx_data) {
qspi->work_mode = QSPI_WORK_MODE_ASYNC_RX_DMA;
qspi->done_mask = HAL_QSPI_STATUS_ASYNC_TDONE;
rxlen = t->data_len;
qspi->async_rx_remain = rxlen;
qspi->async_rx = t->rx_data;
qspi_hw_set_slave_output_en(base, 0);
qspi_hw_set_idma_rx_addr(base, (u32)t->rx_data);
qspi_hw_set_idma_rx_len(base, (u32)rxlen);
qspi_hw_set_idma_rx_en(base, 1);
imsk = ICR_BIT_IDMA_MSK | ICR_BIT_CS_INV_INTE;
imsk &= ~ISTS_BIT_TF_UDR;
qspi_hw_interrupt_enable(base, imsk);
}
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
return ret;
}
static int qspi_slave_can_dma(struct qspi_slave_state *qspi, struct qspi_transfer *t)
{
#ifdef AIC_DMA_DRV
if (t->tx_data) {
/* Meet DMA's address align requirement */
if (((unsigned long)t->tx_data) & (AIC_DMA_ALIGN_SIZE - 1))
return 0;
}
if (t->rx_data) {
/* RX: date length require 4 bytes alignment */
if (t->data_len & 0x3)
return 0;
/* Meet DMA's address align requirement */
if (((unsigned long)t->rx_data) & (AIC_DMA_ALIGN_SIZE - 1))
return 0;
}
return 1;
#else
return 0;
#endif
}
int hal_qspi_slave_transfer_async(qspi_slave_handle *h, struct qspi_transfer *t)
{
struct qspi_slave_state *qspi;
CHECK_PARAM(h, -EINVAL);
CHECK_PARAM(t, -EINVAL);
qspi = (struct qspi_slave_state *)h;
if (qspi_slave_can_dma(qspi, t))
return qspi_slave_transfer_dma_async(qspi, t);
return qspi_slave_transfer_cpu_async(qspi, t);
}
int hal_qspi_slave_transfer_abort(qspi_slave_handle *h)
{
struct qspi_slave_state *qspi;
u32 base;
qspi = (struct qspi_slave_state *)h;
base = qspi_hw_index_to_base(qspi->idx);
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_CPU) {
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
}
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_CPU) {
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
}
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_DMA) {
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
qspi_hw_set_idma_rx_en(base, 0);
qspi_hw_set_idma_rx_len(base, 0);
}
if (qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_DMA) {
qspi_hw_clear_interrupt_status(base, ISTS_BIT_ALL_MSK);
qspi_hw_interrupt_disable(base, ICR_BIT_ALL_MSK);
qspi_hw_set_idma_tx_en(base, 0);
qspi_hw_set_idma_tx_len(base, 0);
}
return 0;
}
int hal_qspi_slave_transfer_count(qspi_slave_handle *h)
{
struct qspi_slave_state *qspi;
qspi = (struct qspi_slave_state *)h;
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_CPU) ||
(qspi->work_mode == QSPI_WORK_MODE_ASYNC_RX_DMA)) {
return qspi->async_rx_count;
}
if ((qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_CPU) ||
(qspi->work_mode == QSPI_WORK_MODE_ASYNC_TX_DMA)) {
return qspi->async_tx_count;
}
return -1;
}
#endif

View File

@@ -68,6 +68,7 @@ int hal_qspi_master_transfer_bit_mode(qspi_master_handle *h, struct qspi_bm_tran
if (!t->rx_bits_len && !t->tx_bits_len)
return -EINVAL;
qspi_hw_reset_fifo(base);
if (t->tx_data && t->rx_data) {
ret = qspi_hw_bit_mode_send_then_recv(base, t->tx_data, t->tx_bits_len,
t->rx_data, t->rx_bits_len);
@@ -392,7 +393,7 @@ int qspi_wait_transfer_done(u32 base, u32 tmo)
u32 cnt = 0;
while (qspi_hw_check_transfer_done(base) == false) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -408,7 +409,7 @@ int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo)
while (len) {
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
if (free_len <= (QSPI_FIFO_DEPTH >> 3)) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -418,13 +419,13 @@ int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo)
qspi_hw_write_fifo(base, data, dolen);
data += dolen;
len -= dolen;
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
}
/* Data are written to FIFO, waiting all data are sent out */
while (qspi_hw_get_tx_fifo_cnt(base)) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -439,7 +440,7 @@ int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo)
while (len) {
dolen = qspi_hw_get_rx_fifo_cnt(base);
if (dolen == 0) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -450,29 +451,35 @@ int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo)
qspi_hw_read_fifo(base, data, dolen);
data += dolen;
len -= dolen;
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
}
return 0;
}
u32 qspi_calc_timeout(u32 bus_hz, u32 bw, u32 len)
u32 qspi_calc_timeout(u32 bus_hz, u32 len)
{
u32 tmo_cnt, tmo_us;
u32 tmo_speed = 100;
if (bus_hz < HAL_QSPI_MIN_FREQ_HZ)
tmo_us = (1000000 * (len * 8 / bw)) / bus_hz;
tmo_us = (1000000 * len * 8) / bus_hz;
else if (bus_hz < 1000000)
tmo_us = (1000 * (len * 8 / bw)) / (bus_hz / 1000);
tmo_us = (1000 * len * 8) / (bus_hz / 1000);
else
tmo_us = (len * 8 / bw) / (bus_hz / 1000000);
tmo_us = (len * 8) / (bus_hz / 1000000);
/* Add 100ms time padding */
tmo_us += 100000;
tmo_cnt = tmo_us / HAL_QSPI_WAIT_PER_CYCLE;
return tmo_cnt;
/* Consider the speed limit of DMA or CPU copy.
*/
if (len >= QSPI_TRANSFER_DATA_LEN_1M)
tmo_speed = ((len / QSPI_CPU_DMA_MIN_SPEED_MS) + 1) * 1000;
return max(tmo_cnt, tmo_speed);
}
static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
@@ -493,9 +500,10 @@ static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
if (t->data_len == 0)
return -EINVAL;
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, qspi->bus_width, t->data_len);
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, t->data_len);
/* CPU mode, spend more time */
tmo_cnt *= 10;
qspi_hw_reset_fifo(base);
if (t->tx_data) {
txlen = t->data_len;
@@ -539,7 +547,7 @@ static int qspi_master_wait_dma_done(struct aic_dma_chan *ch, u32 tmo)
u32 left, cnt = 0;
while (hal_dma_chan_tx_status(ch, &left) != DMA_COMPLETE && left) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo) {
return -ETIMEDOUT;
@@ -569,7 +577,8 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
if (t->data_len == 0)
return -EINVAL;
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, qspi->bus_width, t->data_len);
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, t->data_len);
qspi_hw_reset_fifo(base);
if (t->tx_data) {
txlen = t->data_len;
@@ -588,7 +597,10 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
dmacfg.src_addr_width = qspi->dma_cfg.mem_bus_width;
dmacfg.src_maxburst = qspi->dma_cfg.mem_max_burst;
dmacfg.dst_addr_width = qspi->dma_cfg.dev_bus_width;
if (!(txlen % HAL_QSPI_DMA_4BYTES_LINE))
dmacfg.dst_addr_width = qspi->dma_cfg.dev_bus_width;
else
dmacfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dmacfg.dst_maxburst = qspi->dma_cfg.dev_max_burst;
ret = hal_dma_chan_config(dma_tx, &dmacfg);
@@ -609,16 +621,16 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
goto out;
}
qspi_hw_start_transfer(base);
ret = qspi_wait_transfer_done(base, tmo_cnt);
if (ret < 0) {
hal_log_err("TX wait transfer done timeout.\n");
goto tx_stop;
}
ret = qspi_master_wait_dma_done(dma_tx, tmo_cnt);
if (ret < 0) {
hal_log_err("TX wait dma done timeout.\n");
goto tx_stop;
}
ret = qspi_wait_transfer_done(base, tmo_cnt);
if (ret < 0) {
hal_log_err("TX wait transfer done timeout.\n");
goto tx_stop;
}
tx_stop:
qspi_hw_tx_dma_disable(base);
hal_dma_chan_stop(dma_tx);
@@ -634,7 +646,10 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
dmacfg.src_addr = (unsigned long)QSPI_REG_RXD(base);
dmacfg.dst_addr = (unsigned long)t->rx_data;
dmacfg.src_addr_width = qspi->dma_cfg.mem_bus_width;
if (!(rxlen % HAL_QSPI_DMA_4BYTES_LINE))
dmacfg.src_addr_width = qspi->dma_cfg.dev_bus_width;
else
dmacfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dmacfg.src_maxburst = qspi->dma_cfg.mem_max_burst;
dmacfg.dst_addr_width = qspi->dma_cfg.dev_bus_width;
dmacfg.dst_maxburst = qspi->dma_cfg.dev_max_burst;
@@ -743,6 +758,7 @@ static int qspi_master_transfer_cpu_async(struct qspi_master_state *qspi,
if (t->data_len == 0)
return -EINVAL;
qspi_hw_reset_fifo(base);
qspi_hw_interrupt_disable(base, ICR_BIT_CPU_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {
@@ -822,6 +838,7 @@ static int qspi_master_transfer_dma_async(struct qspi_master_state *qspi,
if (t->data_len == 0)
return -EINVAL;
qspi_hw_reset_fifo(base);
qspi_hw_interrupt_disable(base, ICR_BIT_DMA_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {

View File

@@ -64,6 +64,7 @@ int hal_qspi_master_transfer_bit_mode(qspi_master_handle *h, struct qspi_bm_tran
if (!t->rx_bits_len && !t->tx_bits_len)
return -EINVAL;
qspi_hw_reset_fifo(base);
if (t->tx_data && t->rx_data) {
ret = qspi_hw_bit_mode_send_then_recv(base, t->tx_data, t->tx_bits_len,
t->rx_data, t->rx_bits_len);
@@ -382,7 +383,7 @@ int qspi_wait_transfer_done(u32 base, u32 tmo)
u32 cnt = 0;
while (qspi_hw_check_transfer_done(base) == false) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -396,7 +397,7 @@ int qspi_wait_gpdma_tx_done(u32 base, u32 tmo)
u32 cnt = 0;
while (qspi_hw_check_gpdma_tx_done(base) == false) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -409,7 +410,7 @@ int qspi_wait_gpdma_rx_done(u32 base, u32 tmo)
u32 cnt = 0;
while (qspi_hw_check_gpdma_rx_done(base) == false) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -425,7 +426,7 @@ int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo)
while (len) {
free_len = QSPI_FIFO_DEPTH - qspi_hw_get_tx_fifo_cnt(base);
if (free_len <= (QSPI_FIFO_DEPTH >> 3)) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -435,13 +436,13 @@ int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo)
qspi_hw_write_fifo(base, data, dolen);
data += dolen;
len -= dolen;
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
}
/* Data are written to FIFO, waiting all data are sent out */
while (qspi_hw_get_tx_fifo_cnt(base)) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -456,7 +457,7 @@ int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo)
while (len) {
dolen = qspi_hw_get_rx_fifo_cnt(base);
if (dolen == 0) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo)
return -ETIMEDOUT;
@@ -467,29 +468,35 @@ int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo)
qspi_hw_read_fifo(base, data, dolen);
data += dolen;
len -= dolen;
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
}
return 0;
}
u32 qspi_calc_timeout(u32 bus_hz, u32 bw, u32 len)
u32 qspi_calc_timeout(u32 bus_hz, u32 len)
{
u32 tmo_cnt, tmo_us;
u32 tmo_speed = 100;
if (bus_hz < HAL_QSPI_MIN_FREQ_HZ)
tmo_us = (1000000 * (len * 8 / bw)) / bus_hz;
tmo_us = (1000000 * len * 8) / bus_hz;
else if (bus_hz < 1000000)
tmo_us = (1000 * (len * 8 / bw)) / (bus_hz / 1000);
tmo_us = (1000 * len * 8) / (bus_hz / 1000);
else
tmo_us = (len * 8 / bw) / (bus_hz / 1000000);
tmo_us = (len * 8) / (bus_hz / 1000000);
/* Add 100ms time padding */
tmo_us += 100000;
tmo_cnt = tmo_us / HAL_QSPI_WAIT_PER_CYCLE;
return tmo_cnt;
/* Consider the speed limit of DMA or CPU copy.
*/
if (len >= QSPI_TRANSFER_DATA_LEN_1M)
tmo_speed = ((len / QSPI_CPU_DMA_MIN_SPEED_MS) + 1) * 1000;
return max(tmo_cnt, tmo_speed);
}
static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
@@ -510,9 +517,10 @@ static int qspi_master_transfer_cpu_sync(qspi_master_handle *h,
if (t->data_len == 0)
return -EINVAL;
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, qspi->bus_width, t->data_len);
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, t->data_len);
/* CPU mode, spend more time */
tmo_cnt *= 10;
qspi_hw_reset_fifo(base);
if (t->tx_data) {
txlen = t->data_len;
@@ -556,7 +564,7 @@ static int qspi_master_wait_dma_done(struct aic_dma_chan *ch, u32 tmo)
u32 left, cnt = 0;
while (hal_dma_chan_tx_status(ch, &left) != DMA_COMPLETE && left) {
aic_udelay(HAL_QSPI_WAIT_30_US);
aic_udelay(HAL_QSPI_WAIT_DELAY_US);
cnt++;
if (cnt > tmo) {
return -ETIMEDOUT;
@@ -586,7 +594,8 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
if (t->data_len == 0)
return -EINVAL;
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, qspi->bus_width, t->data_len);
tmo_cnt = qspi_calc_timeout(qspi->bus_hz, t->data_len);
qspi_hw_reset_fifo(base);
if (t->tx_data) {
txlen = t->data_len;
@@ -616,9 +625,9 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
hal_log_err("TX dma chan config failure.\n");
goto out;
}
ret = hal_dma_chan_prep_device(dma_tx, PTR2U32(QSPI_REG_TXD(base)),
ret = hal_dma_prep_mode_device(dma_tx, PTR2U32(QSPI_REG_TXD(base)),
PTR2U32(t->tx_data), txlen,
DMA_MEM_TO_DEV);
DMA_MEM_TO_DEV, TYPE_IO_FAST);
if (ret < 0) {
hal_log_err("TX dma chan prepare failure.\n");
goto out;
@@ -629,16 +638,16 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
goto out;
}
qspi_hw_start_transfer(base);
ret = qspi_wait_gpdma_tx_done(base, tmo_cnt);
if (ret < 0) {
hal_log_err("TX wait transfer done timeout.\n");
goto tx_stop;
}
ret = qspi_master_wait_dma_done(dma_tx, tmo_cnt);
if (ret < 0) {
hal_log_err("TX wait dma done timeout.\n");
goto tx_stop;
}
ret = qspi_wait_transfer_done(base, tmo_cnt);
if (ret < 0) {
hal_log_err("TX wait transfer done timeout.\n");
goto tx_stop;
}
tx_stop:
qspi_hw_tx_dma_disable(base);
hal_dma_chan_stop(dma_tx);
@@ -670,9 +679,9 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
qspi_hw_dma_word_enable(base, false);
else
qspi_hw_dma_word_enable(base, true);
ret = hal_dma_chan_prep_device(dma_rx, PTR2U32(t->rx_data),
ret = hal_dma_prep_mode_device(dma_rx, PTR2U32(t->rx_data),
PTR2U32(QSPI_REG_RXD(base)), rxlen,
DMA_DEV_TO_MEM);
DMA_DEV_TO_MEM, TYPE_IO_FAST);
if (ret < 0) {
hal_log_err("RX dma chan prepare failure.\n");
goto out;
@@ -683,7 +692,7 @@ static int qspi_master_transfer_dma_sync(qspi_master_handle *h,
goto out;
}
qspi_hw_start_transfer(base);
ret = qspi_wait_gpdma_rx_done(base, tmo_cnt);
ret = qspi_wait_transfer_done(base, tmo_cnt);
if (ret < 0) {
hal_log_err("RX wait transfer done timeout.\n");
goto rx_stop;
@@ -769,6 +778,7 @@ static int qspi_master_transfer_cpu_async(struct qspi_master_state *qspi,
if (t->data_len == 0)
return -EINVAL;
qspi_hw_reset_fifo(base);
qspi_hw_interrupt_disable(base, ICR_BIT_CPU_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {
@@ -848,6 +858,7 @@ static int qspi_master_transfer_dma_async(struct qspi_master_state *qspi,
if (t->data_len == 0)
return -EINVAL;
qspi_hw_reset_fifo(base);
qspi_hw_interrupt_disable(base, ICR_BIT_DMA_MSK);
qspi->status = HAL_QSPI_STATUS_IN_PROGRESS;
if (t->tx_data) {
@@ -874,9 +885,9 @@ static int qspi_master_transfer_dma_async(struct qspi_master_state *qspi,
if (ret)
goto out;
hal_dma_chan_register_cb(dma_tx, qspi_master_dma_tx_callback, qspi);
ret = hal_dma_chan_prep_device(dma_tx, PTR2U32(QSPI_REG_TXD(base)),
ret = hal_dma_prep_mode_device(dma_tx, PTR2U32(QSPI_REG_TXD(base)),
PTR2U32(t->tx_data), txlen,
DMA_MEM_TO_DEV);
DMA_MEM_TO_DEV, TYPE_IO_FAST);
if (ret)
goto out;
ret = hal_dma_chan_start(dma_tx);
@@ -909,9 +920,9 @@ static int qspi_master_transfer_dma_async(struct qspi_master_state *qspi,
else
qspi_hw_dma_word_enable(base, true);
hal_dma_chan_register_cb(dma_rx, qspi_master_dma_rx_callback, qspi);
ret = hal_dma_chan_prep_device(dma_rx, PTR2U32(t->rx_data),
ret = hal_dma_prep_mode_device(dma_rx, PTR2U32(t->rx_data),
PTR2U32(QSPI_REG_RXD(base)), rxlen,
DMA_DEV_TO_MEM);
DMA_DEV_TO_MEM, TYPE_IO_FAST);
if (ret)
goto out;
ret = hal_dma_chan_start(dma_rx);

View File

@@ -27,13 +27,16 @@ extern "C" {
#define QSPI_IS_ASYNC_ALL_DONE(sts, msk) ((sts & msk) == msk)
#define PTR2U32(p) ((u32)(unsigned long)(p))
#define QSPI_TRANSFER_DATA_LEN_1M 0x100000
#define QSPI_CPU_DMA_MIN_SPEED_MS (0x800000 >> 10)
void show_freq(char *msg, u32 id, u32 hz);
void hal_qspi_fifo_reset(u32 base, u32 fifo);
void hal_qspi_show_ists(u32 id, u32 sts);
int qspi_fifo_write_data(u32 base, u8 *data, u32 len, u32 tmo);
int qspi_fifo_read_data(u32 base, u8 *data, u32 len, u32 tmo_us);
int qspi_wait_transfer_done(u32 base, u32 tmo);
u32 qspi_calc_timeout(u32 bus_hz, u32 bw, u32 len);
u32 qspi_calc_timeout(u32 bus_hz, u32 len);
#ifdef __cplusplus
}