/linux-6.14.4/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 142 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) in txring_txq() function 184 netdev_tx_reset_queue(txring_txq(tx_ring)); in ice_clean_tx_ring() 227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); in ice_clean_tx_irq() 316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); in ice_clean_tx_irq() 325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) && in ice_clean_tx_irq() 327 netif_tx_wake_queue(txring_txq(tx_ring)); in ice_clean_tx_irq() 1618 netif_tx_stop_queue(txring_txq(tx_ring)); in __ice_maybe_stop_tx() 1627 netif_tx_start_queue(txring_txq(tx_ring)); in __ice_maybe_stop_tx() 1779 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, in ice_tx_map() 2385 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); in ice_xmit_frame_ring()
|
/linux-6.14.4/drivers/net/ethernet/intel/igb/ |
D | igb.h | 811 static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) in txring_txq() function 819 lockdep_assert_held(&txring_txq(ring)->_xmit_lock); in igb_xdp_ring_update_tail()
|
D | igb_xsk.c | 521 netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes); in igb_xmit_zc()
|
D | igb_main.c | 2952 nq = txring_txq(tx_ring); in igb_xdp_xmit_back() 2989 nq = txring_txq(tx_ring); in igb_xdp_xmit() 4968 netdev_tx_reset_queue(txring_txq(tx_ring)); in igb_clean_tx_ring() 6306 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igb_tx_map() 6334 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in igb_tx_map() 6441 netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount); in igb_xmit_xdp_ring() 6455 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) in igb_xmit_xdp_ring() 8382 netdev_tx_completed_queue(txring_txq(tx_ring), in igb_clean_tx_irq() 8400 nq = txring_txq(tx_ring); in igb_clean_tx_irq() 8949 nq = txring_txq(tx_ring); in igb_finalize_xdp()
|
D | igb_ethtool.c | 1877 netdev_tx_reset_queue(txring_txq(tx_ring)); in igb_clean_test_rings()
|
/linux-6.14.4/drivers/net/ethernet/intel/iavf/ |
D | iavf_txrx.h | 400 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring) in txring_txq() function
|
D | iavf_txrx.c | 85 netdev_tx_reset_queue(txring_txq(tx_ring)); in iavf_clean_tx_ring() 327 netdev_tx_completed_queue(txring_txq(tx_ring), in iavf_clean_tx_irq() 1998 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in iavf_tx_map() 2027 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in iavf_tx_map()
|
/linux-6.14.4/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.h | 561 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) in txring_txq() function
|
D | i40e_txrx.c | 813 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40e_clean_tx_ring() 1030 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq() 3657 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map() 3698 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in i40e_tx_map()
|
/linux-6.14.4/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 1006 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in fm10k_tx_map() 1029 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in fm10k_tx_map() 1306 netdev_tx_completed_queue(txring_txq(tx_ring), in fm10k_clean_tx_irq()
|
D | fm10k.h | 167 static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) in txring_txq() function
|
D | fm10k_netdev.c | 187 netdev_tx_reset_queue(txring_txq(tx_ring)); in fm10k_clean_tx_ring()
|
/linux-6.14.4/drivers/net/ethernet/meta/fbnic/ |
D | fbnic_txrx.c | 101 static struct netdev_queue *txring_txq(const struct net_device *dev, in txring_txq() function 111 struct netdev_queue *txq = txring_txq(dev, ring); in fbnic_maybe_stop_tx() 122 struct netdev_queue *dev_queue = txring_txq(skb->dev, ring); in fbnic_tx_sent_queue() 442 txq = txring_txq(nv->napi.dev, ring); in fbnic_clean_twq0()
|
/linux-6.14.4/drivers/net/ethernet/intel/igc/ |
D | igc_main.c | 255 netdev_tx_reset_queue(txring_txq(tx_ring)); in igc_clean_tx_ring() 1129 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_init_tx_empty_descriptor() 1388 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map() 1416 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in igc_tx_map() 2437 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); in igc_xdp_init_tx_descriptor() 2492 nq = txring_txq(ring); in igc_xdp_xmit_back() 2569 nq = txring_txq(ring); in igc_finalize_xdp() 3021 struct netdev_queue *nq = txring_txq(ring); in igc_xdp_xmit_zc() 3090 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); in igc_xdp_xmit_zc() 3211 netdev_tx_completed_queue(txring_txq(tx_ring), in igc_clean_tx_irq() [all …]
|
D | igc.h | 678 static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) in txring_txq() function
|
/linux-6.14.4/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe.h | 1025 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) in txring_txq() function
|
D | ixgbe_main.c | 6454 netdev_tx_reset_queue(txring_txq(tx_ring)); in ixgbe_clean_tx_ring() 8767 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ixgbe_tx_map() 8795 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in ixgbe_tx_map()
|
D | ixgbe_ethtool.c | 2087 netdev_tx_reset_queue(txring_txq(tx_ring)); in ixgbe_clean_test_rings()
|