aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2022-09-01 12:40:40 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2022-09-27 09:01:01 -0700
commitb3056ae2b57858b02b376b3fed6077040caf14b4 (patch)
tree12bed1da7950ccc5cda023358170b6747097d668
parent29322791bc8b4f42fc65734840826e3ddc30921e (diff)
downloadlinux-sgx-b3056ae2b57858b02b376b3fed6077040caf14b4.tar.gz
ice: xsk: drop power of 2 ring size restriction for AF_XDP
We had multiple customers in the past months that reported commit 296f13ff3854 ("ice: xsk: Force rings to be sized to power of 2") makes them unable to use ring size of 8160 in conjunction with AF_XDP. Remove this restriction. Fixes: 296f13ff3854 ("ice: xsk: Force rings to be sized to power of 2") CC: Alasdair McWilliam <alasdair.mcwilliam@outlook.com> Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8833b66b4e54ad..056c904b83ccb6 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -392,13 +392,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
goto failure;
}
- if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
- !is_power_of_2(vsi->tx_rings[qid]->count)) {
- netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
- pool_failure = -EINVAL;
- goto failure;
- }
-
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
@@ -534,11 +527,10 @@ exit:
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
- u16 batched, leftover, i, tail_bumps;
+ u16 leftover, i, tail_bumps;
- batched = ALIGN_DOWN(count, rx_thresh);
- tail_bumps = batched / rx_thresh;
- leftover = count & (rx_thresh - 1);
+ tail_bumps = count / rx_thresh;
+ leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
@@ -1037,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}