#endif /* !CONFIG_DYNAMIC_DEBUG */
 
 static DEFINE_IDA(ice_aux_ida);
+DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+EXPORT_SYMBOL(ice_xdp_locking_key);
 
 static struct workqueue_struct *ice_wq;
 static const struct net_device_ops ice_netdev_safe_mode_ops;
                        goto free_xdp_rings;
                ice_set_ring_xdp(xdp_ring);
                xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+               spin_lock_init(&xdp_ring->tx_lock);
                for (j = 0; j < xdp_ring->count; j++) {
                        tx_desc = ICE_TX_DESC(xdp_ring, j);
                        tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
                }
        }
 
-       ice_for_each_rxq(vsi, i)
-               vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
+       ice_for_each_rxq(vsi, i) {
+               if (static_key_enabled(&ice_xdp_locking_key))
+                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+               else
+                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
+       }
 
        return 0;
 
        if (__ice_vsi_get_qs(&xdp_qs_cfg))
                goto err_map_xdp;
 
+       if (static_key_enabled(&ice_xdp_locking_key))
+               netdev_warn(vsi->netdev,
+                           "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
+
        if (ice_xdp_alloc_setup_rings(vsi))
                goto clear_xdp_rings;
 
        devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
        vsi->xdp_rings = NULL;
 
+       if (static_key_enabled(&ice_xdp_locking_key))
+               static_branch_dec(&ice_xdp_locking_key);
+
        if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
                return 0;
 
        }
 }
 
+/**
+ * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
+ * @vsi: VSI to determine the count of XDP Tx qs
+ *
+ * returns 0 if Tx qs count is higher than at least half of CPU count,
+ * -ENOMEM otherwise
+ */
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
+{
+       u16 avail = ice_get_avail_txq_count(vsi->back);
+       u16 cpus = num_possible_cpus();
+
+       if (avail < cpus / 2)
+               return -ENOMEM;
+
+       vsi->num_xdp_txq = min_t(u16, avail, cpus);
+
+       if (vsi->num_xdp_txq < cpus)
+               static_branch_inc(&ice_xdp_locking_key);
+
+       return 0;
+}
+
 /**
  * ice_xdp_setup_prog - Add or remove XDP eBPF program
  * @vsi: VSI to setup XDP for
        }
 
        if (!ice_is_xdp_ena_vsi(vsi) && prog) {
-               vsi->num_xdp_txq = num_possible_cpus();
-               xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
-               if (xdp_ring_err)
-                       NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+               xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
+               if (xdp_ring_err) {
+                       NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+               } else {
+                       xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+                       if (xdp_ring_err)
+                               NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+               }
        } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
                xdp_ring_err = ice_destroy_xdp_rings(vsi);
                if (xdp_ring_err)
 
        case XDP_PASS:
                return ICE_XDP_PASS;
        case XDP_TX:
+               if (static_branch_unlikely(&ice_xdp_locking_key))
+                       spin_lock(&xdp_ring->tx_lock);
                err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+               if (static_branch_unlikely(&ice_xdp_locking_key))
+                       spin_unlock(&xdp_ring->tx_lock);
                if (err == ICE_XDP_CONSUMED)
                        goto out_failure;
                return err;
        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
                return -EINVAL;
 
-       xdp_ring = vsi->xdp_rings[queue_index];
+       if (static_branch_unlikely(&ice_xdp_locking_key)) {
+               queue_index %= vsi->num_xdp_txq;
+               xdp_ring = vsi->xdp_rings[queue_index];
+               spin_lock(&xdp_ring->tx_lock);
+       } else {
+               xdp_ring = vsi->xdp_rings[queue_index];
+       }
+
        for (i = 0; i < n; i++) {
                struct xdp_frame *xdpf = frames[i];
                int err;
        if (unlikely(flags & XDP_XMIT_FLUSH))
                ice_xdp_ring_update_tail(xdp_ring);
 
+       if (static_branch_unlikely(&ice_xdp_locking_key))
+               spin_unlock(&xdp_ring->tx_lock);
+
        return nxmit;
 }