@@ -485,17 +485,18 @@ static void m_can_clean(struct net_device *net)
485
485
{
486
486
struct m_can_classdev * cdev = netdev_priv (net );
487
487
488
- if (cdev -> tx_skb ) {
489
- u32 putidx = 0 ;
488
+ if (cdev -> tx_ops ) {
489
+ for (int i = 0 ; i != cdev -> tx_fifo_size ; ++ i ) {
490
+ if (!cdev -> tx_ops [i ].skb )
491
+ continue ;
490
492
491
- net -> stats .tx_errors ++ ;
492
- if (cdev -> version > 30 )
493
- putidx = FIELD_GET (TXFQS_TFQPI_MASK ,
494
- m_can_read (cdev , M_CAN_TXFQS ));
495
-
496
- can_free_echo_skb (cdev -> net , putidx , NULL );
497
- cdev -> tx_skb = NULL ;
493
+ net -> stats .tx_errors ++ ;
494
+ cdev -> tx_ops [i ].skb = NULL ;
495
+ }
498
496
}
497
+
498
+ for (int i = 0 ; i != cdev -> can .echo_skb_max ; ++ i )
499
+ can_free_echo_skb (cdev -> net , i , NULL );
499
500
}
500
501
501
502
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -1685,8 +1686,9 @@ static int m_can_close(struct net_device *dev)
1685
1686
m_can_clk_stop (cdev );
1686
1687
free_irq (dev -> irq , dev );
1687
1688
1689
+ m_can_clean (dev );
1690
+
1688
1691
if (cdev -> is_peripheral ) {
1689
- cdev -> tx_skb = NULL ;
1690
1692
destroy_workqueue (cdev -> tx_wq );
1691
1693
cdev -> tx_wq = NULL ;
1692
1694
can_rx_offload_disable (& cdev -> offload );
@@ -1713,20 +1715,18 @@ static int m_can_next_echo_skb_occupied(struct net_device *dev, u32 putidx)
1713
1715
return !!cdev -> can .echo_skb [next_idx ];
1714
1716
}
1715
1717
1716
- static netdev_tx_t m_can_tx_handler (struct m_can_classdev * cdev )
1718
+ static netdev_tx_t m_can_tx_handler (struct m_can_classdev * cdev ,
1719
+ struct sk_buff * skb )
1717
1720
{
1718
- struct canfd_frame * cf = (struct canfd_frame * )cdev -> tx_skb -> data ;
1721
+ struct canfd_frame * cf = (struct canfd_frame * )skb -> data ;
1719
1722
u8 len_padded = DIV_ROUND_UP (cf -> len , 4 );
1720
1723
struct m_can_fifo_element fifo_element ;
1721
1724
struct net_device * dev = cdev -> net ;
1722
- struct sk_buff * skb = cdev -> tx_skb ;
1723
1725
u32 cccr , fdflags ;
1724
1726
u32 txfqs ;
1725
1727
int err ;
1726
1728
u32 putidx ;
1727
1729
1728
- cdev -> tx_skb = NULL ;
1729
-
1730
1730
/* Generate ID field for TX buffer Element */
1731
1731
/* Common to all supported M_CAN versions */
1732
1732
if (cf -> can_id & CAN_EFF_FLAG ) {
@@ -1850,10 +1850,31 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
1850
1850
1851
1851
static void m_can_tx_work_queue (struct work_struct * ws )
1852
1852
{
1853
- struct m_can_classdev * cdev = container_of (ws , struct m_can_classdev ,
1854
- tx_work );
1853
+ struct m_can_tx_op * op = container_of (ws , struct m_can_tx_op , work );
1854
+ struct m_can_classdev * cdev = op -> cdev ;
1855
+ struct sk_buff * skb = op -> skb ;
1855
1856
1856
- m_can_tx_handler (cdev );
1857
+ op -> skb = NULL ;
1858
+ m_can_tx_handler (cdev , skb );
1859
+ }
1860
+
1861
+ static void m_can_tx_queue_skb (struct m_can_classdev * cdev , struct sk_buff * skb )
1862
+ {
1863
+ cdev -> tx_ops [cdev -> next_tx_op ].skb = skb ;
1864
+ queue_work (cdev -> tx_wq , & cdev -> tx_ops [cdev -> next_tx_op ].work );
1865
+
1866
+ ++ cdev -> next_tx_op ;
1867
+ if (cdev -> next_tx_op >= cdev -> tx_fifo_size )
1868
+ cdev -> next_tx_op = 0 ;
1869
+ }
1870
+
1871
+ static netdev_tx_t m_can_start_peripheral_xmit (struct m_can_classdev * cdev ,
1872
+ struct sk_buff * skb )
1873
+ {
1874
+ netif_stop_queue (cdev -> net );
1875
+ m_can_tx_queue_skb (cdev , skb );
1876
+
1877
+ return NETDEV_TX_OK ;
1857
1878
}
1858
1879
1859
1880
static netdev_tx_t m_can_start_xmit (struct sk_buff * skb ,
@@ -1864,30 +1885,15 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
1864
1885
if (can_dev_dropped_skb (dev , skb ))
1865
1886
return NETDEV_TX_OK ;
1866
1887
1867
- if (cdev -> is_peripheral ) {
1868
- if (cdev -> tx_skb ) {
1869
- netdev_err (dev , "hard_xmit called while tx busy\n" );
1870
- return NETDEV_TX_BUSY ;
1871
- }
1872
-
1873
- if (cdev -> can .state == CAN_STATE_BUS_OFF ) {
1874
- m_can_clean (dev );
1875
- } else {
1876
- /* Need to stop the queue to avoid numerous requests
1877
- * from being sent. Suggested improvement is to create
1878
- * a queueing mechanism that will queue the skbs and
1879
- * process them in order.
1880
- */
1881
- cdev -> tx_skb = skb ;
1882
- netif_stop_queue (cdev -> net );
1883
- queue_work (cdev -> tx_wq , & cdev -> tx_work );
1884
- }
1885
- } else {
1886
- cdev -> tx_skb = skb ;
1887
- return m_can_tx_handler (cdev );
1888
+ if (cdev -> can .state == CAN_STATE_BUS_OFF ) {
1889
+ m_can_clean (cdev -> net );
1890
+ return NETDEV_TX_OK ;
1888
1891
}
1889
1892
1890
- return NETDEV_TX_OK ;
1893
+ if (cdev -> is_peripheral )
1894
+ return m_can_start_peripheral_xmit (cdev , skb );
1895
+ else
1896
+ return m_can_tx_handler (cdev , skb );
1891
1897
}
1892
1898
1893
1899
static enum hrtimer_restart hrtimer_callback (struct hrtimer * timer )
@@ -1927,15 +1933,17 @@ static int m_can_open(struct net_device *dev)
1927
1933
1928
1934
/* register interrupt handler */
1929
1935
if (cdev -> is_peripheral ) {
1930
- cdev -> tx_skb = NULL ;
1931
- cdev -> tx_wq = alloc_workqueue ("mcan_wq" ,
1932
- WQ_FREEZABLE | WQ_MEM_RECLAIM , 0 );
1936
+ cdev -> tx_wq = alloc_ordered_workqueue ("mcan_wq" ,
1937
+ WQ_FREEZABLE | WQ_MEM_RECLAIM );
1933
1938
if (!cdev -> tx_wq ) {
1934
1939
err = - ENOMEM ;
1935
1940
goto out_wq_fail ;
1936
1941
}
1937
1942
1938
- INIT_WORK (& cdev -> tx_work , m_can_tx_work_queue );
1943
+ for (int i = 0 ; i != cdev -> tx_fifo_size ; ++ i ) {
1944
+ cdev -> tx_ops [i ].cdev = cdev ;
1945
+ INIT_WORK (& cdev -> tx_ops [i ].work , m_can_tx_work_queue );
1946
+ }
1939
1947
1940
1948
err = request_threaded_irq (dev -> irq , NULL , m_can_isr ,
1941
1949
IRQF_ONESHOT ,
@@ -2228,6 +2236,19 @@ int m_can_class_register(struct m_can_classdev *cdev)
2228
2236
{
2229
2237
int ret ;
2230
2238
2239
+ cdev -> tx_fifo_size = max (1 , min (cdev -> mcfg [MRAM_TXB ].num ,
2240
+ cdev -> mcfg [MRAM_TXE ].num ));
2241
+ if (cdev -> is_peripheral ) {
2242
+ cdev -> tx_ops =
2243
+ devm_kzalloc (cdev -> dev ,
2244
+ cdev -> tx_fifo_size * sizeof (* cdev -> tx_ops ),
2245
+ GFP_KERNEL );
2246
+ if (!cdev -> tx_ops ) {
2247
+ dev_err (cdev -> dev , "Failed to allocate tx_ops for workqueue\n" );
2248
+ return - ENOMEM ;
2249
+ }
2250
+ }
2251
+
2231
2252
if (cdev -> pm_clock_support ) {
2232
2253
ret = m_can_clk_start (cdev );
2233
2254
if (ret )
0 commit comments