...
 
Commits (5)
......@@ -90,7 +90,7 @@ struct ctucan_priv {
struct list_head peers_on_pdev;
};
#define CTUCAN_FLAG_RX_SCHED 1
// #define CTUCAN_FLAG_RX_SCHED 1
#define CTUCAN_FLAG_RX_FFW_BUFFERED 2
static int ctucan_reset(struct net_device *ndev)
......@@ -223,10 +223,11 @@ static int ctucan_chip_start(struct net_device *ndev)
int_msk.u32 = ~int_ena.u32; /* mask all disabled interrupts */
clear_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
// clear_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
ctu_can_fd_int_mask(&priv->p, int_msk, int_enamask_mask);
ctu_can_fd_int_ena(&priv->p, int_ena, int_enamask_mask);
/* It's after reset, so there is no need to clear anything */
ctu_can_fd_int_mask_set(&priv->p, int_msk);
ctu_can_fd_int_ena_set(&priv->p, int_ena);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
......@@ -488,16 +489,13 @@ static int ctucan_rx_poll(struct napi_struct *napi, int quota)
int work_done = 0;
union ctu_can_fd_status status;
u32 framecnt;
u32 i;
//netdev_dbg(ndev, "ctucan_rx_poll");
framecnt = ctu_can_fd_get_rx_frame_count(&priv->p);
netdev_dbg(ndev, "rx_poll: %d frames in RX FIFO", framecnt);
while (framecnt && work_done < quota) {
netdev_dbg(ndev, "rx_poll: %d frames in RX FIFO", framecnt);
for (i = 0; i < framecnt; ++i) {
ctucan_rx(ndev);
work_done++;
}
ctucan_rx(ndev);
work_done++;
framecnt = ctu_can_fd_get_rx_frame_count(&priv->p);
}
......@@ -527,7 +525,7 @@ static int ctucan_rx_poll(struct napi_struct *napi, int quota)
if (work_done)
can_led_event(ndev, CAN_LED_EVENT_RX);
if (work_done < quota) {
if (!framecnt) {
if (napi_complete_done(napi, work_done)) {
union ctu_can_fd_int_stat iec;
/* Clear and enable RBNEI. It is level-triggered, so
......@@ -535,9 +533,9 @@ static int ctucan_rx_poll(struct napi_struct *napi, int quota)
*/
iec.u32 = 0;
iec.s.rbnei = 1;
clear_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
// clear_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
ctu_can_fd_int_clr(&priv->p, iec);
ctu_can_fd_int_ena_set(&priv->p, iec);
ctu_can_fd_int_mask_clr(&priv->p, iec);
}
}
......@@ -627,13 +625,11 @@ static void ctucan_tx_interrupt(struct net_device *ndev)
/* do not clear nor wake */
return;
}
/* some_buffers_processed is still false */
goto clear;
}
priv->txb_tail++;
first = false;
some_buffers_processed = true;
/* Adjust priorities *before* marking the buffer
* as empty.
*/
......@@ -641,11 +637,16 @@ static void ctucan_tx_interrupt(struct net_device *ndev)
ctu_can_fd_txt_set_empty(&priv->p, txb_idx);
}
clear:
/* Clear the interrupt again as not to receive it again for
* a buffer we already handled (possibly causing the bug log)
*/
ctu_can_fd_int_clr(&priv->p, icr);
/* If no buffers were processed this time, wa cannot
* clear - that would introduce a race condition. */
if (some_buffers_processed) {
/* Clear the interrupt again as not to receive it again
* for a buffer we already handled (possibly causing
* the bug log) */
ctu_can_fd_int_clr(&priv->p, icr);
}
} while (some_buffers_processed);
can_led_event(ndev, CAN_LED_EVENT_TX);
netif_wake_queue(ndev);
}
......@@ -674,8 +675,8 @@ static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
/* Get the interrupt status */
isr = ctu_can_fd_int_sts(&priv->p);
if (test_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags))
isr.s.rbnei = 0;
//if (test_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags))
// isr.s.rbnei = 0;
if (!isr.u32)
return irq_loops ? IRQ_HANDLED : IRQ_NONE;
......@@ -685,19 +686,20 @@ static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
netdev_dbg(ndev, "RXBNEI");
icr.u32 = 0;
icr.s.rbnei = 1;
/* Clear and disable RXBNEI, schedule NAPI */
/* Mask RXBNEI the first then clear interrupt,
* then schedule NAPI. Even if another IRQ fires,
* isr.s.rbnei will always be 0 (masked).
*/
ctu_can_fd_int_mask_set(&priv->p, icr);
ctu_can_fd_int_clr(&priv->p, icr);
ctu_can_fd_int_ena_clr(&priv->p, icr);
set_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
// set_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
napi_schedule(&priv->napi);
}
/* TX Buffer HW Command Interrupt */
if (isr.s.txbhci) {
netdev_dbg(ndev, "TXBHCI");
icr.u32 = 0;
icr.s.txbhci = 1;
ctu_can_fd_int_clr(&priv->p, icr);
/* Cleared inside */
ctucan_tx_interrupt(ndev);
}
......@@ -722,8 +724,8 @@ static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
imask.u32 = 0xffffffff;
ival.u32 = 0;
ctu_can_fd_int_ena(&priv->p, imask, ival);
ctu_can_fd_int_mask(&priv->p, imask, ival);
ctu_can_fd_int_ena_clr(&priv->p, ival);
ctu_can_fd_int_mask_set(&priv->p, imask);
}
return IRQ_HANDLED;
......@@ -739,16 +741,16 @@ static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
static void ctucan_chip_stop(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
union ctu_can_fd_int_stat ena, mask;
union ctu_can_fd_int_stat mask;
netdev_dbg(ndev, "ctucan_chip_stop");
ena.u32 = 0;
mask.u32 = 0xffffffff;
/* Disable interrupts and disable can */
ctu_can_fd_int_ena(&priv->p, ena, mask);
ctu_can_fd_int_mask_set(&priv->p, mask);
ctu_can_fd_enable(&priv->p, false);
clear_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
// clear_bit(CTUCAN_FLAG_RX_SCHED, &priv->drv_flags);
priv->can.state = CAN_STATE_STOPPED;
}
......
......@@ -370,6 +370,32 @@ void ctu_can_fd_int_ena(struct ctucanfd_priv *priv,
union ctu_can_fd_int_stat mask,
union ctu_can_fd_int_stat val);
/*
* Mask interrupts of CTU CAN FD Core.
*
* Arguments:
* priv Private info
* mask Mask of interrupts which should be masked.
*/
static inline void ctu_can_fd_int_mask_set(struct ctucanfd_priv *priv,
union ctu_can_fd_int_stat mask)
{
priv->write_reg(priv, CTU_CAN_FD_INT_MASK_SET, mask.u32);
}
/*
* Unmask interrupts of CTU CAN FD Core.
*
* Arguments:
* priv Private info
* mask Mask of interrupts which should be unmasked.
*/
static inline void ctu_can_fd_int_mask_clr(struct ctucanfd_priv *priv,
union ctu_can_fd_int_stat mask)
{
priv->write_reg(priv, CTU_CAN_FD_INT_MASK_CLR, mask.u32);
}
/*
* Mask/Unmask interrupts of CTU CAN FD Core.
*
......