aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c22
3 files changed, 36 insertions, 6 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 54b0bf574c05..183530ed4d1d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1834,8 +1834,13 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
+ struct xdp_frame_bulk bq;
int i;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
for (i = 0; i < num; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
@@ -1857,9 +1862,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
- xdp_return_frame(buf->xdpf);
+ xdp_return_frame_bulk(buf->xdpf, &bq);
}
}
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index f6616c8933ca..3069e192d773 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2440,8 +2440,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
struct mvpp2_txq_pcpu *txq_pcpu, int num)
{
+ struct xdp_frame_bulk bq;
int i;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
for (i = 0; i < num; i++) {
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
@@ -2454,10 +2459,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
dev_kfree_skb_any(tx_buf->skb);
else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
tx_buf->type == MVPP2_TYPE_XDP_NDO)
- xdp_return_frame(tx_buf->xdpf);
+ xdp_return_frame_bulk(tx_buf->xdpf, &bq);
mvpp2_txq_inc_get(txq_pcpu);
}
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
}
static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index ae90d533a350..2e3e78b0f333 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -366,7 +366,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
- bool recycle)
+ bool recycle,
+ struct xdp_frame_bulk *bq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
@@ -379,7 +380,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* XDP_TX from the XSK RQ and XDP_REDIRECT */
dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
xdpi.frame.xdpf->len, DMA_TO_DEVICE);
- xdp_return_frame(xdpi.frame.xdpf);
+ xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
break;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
@@ -397,12 +398,15 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
+ struct xdp_frame_bulk bq;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
u32 xsk_frames = 0;
u16 sqcc;
int i;
+ xdp_frame_bulk_init(&bq);
+
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
@@ -434,7 +438,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@ -447,6 +451,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
}
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ xdp_flush_frame_bulk(&bq);
+
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
@@ -463,8 +469,13 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
+ struct xdp_frame_bulk bq;
u32 xsk_frames = 0;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
u16 ci;
@@ -474,9 +485,12 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
}
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
}