Skip to content

Commit

Permalink
dpif-netdev: Preserve inner offloads on recirculation.
Browse files Browse the repository at this point in the history
Rather than drop all pending Tx offloads on recirculation,
save inner offload offsets and Tx offloads, then restore them
after miniflow_extract().

Fixes: c6538b4 ("dpif-netdev: Fix crash due to tunnel offloading on recirculation.")
Fixes: 084c808 ("userspace: Support VXLAN and GENEVE TSO.")
Signed-off-by: David Marchand <[email protected]>
  • Loading branch information
david-marchand committed Feb 11, 2025
1 parent b9be729 commit 6a19f3b
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 54 deletions.
39 changes: 16 additions & 23 deletions lib/dp-packet.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,26 +111,28 @@ enum dp_packet_offload_mask {
/* Adding new field requires adding to DP_PACKET_OL_SUPPORTED_MASK. */
};

#define DP_PACKET_OL_TX_MASK (DP_PACKET_OL_TX_TCP_SEG | \
DP_PACKET_OL_TX_IPV4 | \
DP_PACKET_OL_TX_IPV6 | \
DP_PACKET_OL_TX_TCP_CKSUM | \
DP_PACKET_OL_TX_UDP_CKSUM | \
DP_PACKET_OL_TX_SCTP_CKSUM | \
DP_PACKET_OL_TX_IP_CKSUM | \
DP_PACKET_OL_TX_TUNNEL_GENEVE | \
DP_PACKET_OL_TX_TUNNEL_VXLAN | \
DP_PACKET_OL_TX_TUNNEL_GRE | \
DP_PACKET_OL_TX_OUTER_IPV4 | \
DP_PACKET_OL_TX_OUTER_IP_CKSUM | \
DP_PACKET_OL_TX_OUTER_UDP_CKSUM | \
DP_PACKET_OL_TX_OUTER_IPV6)

#define DP_PACKET_OL_SUPPORTED_MASK (DP_PACKET_OL_RSS_HASH | \
DP_PACKET_OL_FLOW_MARK | \
DP_PACKET_OL_RX_L4_CKSUM_BAD | \
DP_PACKET_OL_RX_IP_CKSUM_BAD | \
DP_PACKET_OL_RX_L4_CKSUM_GOOD | \
DP_PACKET_OL_RX_IP_CKSUM_GOOD | \
DP_PACKET_OL_TX_TCP_SEG | \
DP_PACKET_OL_TX_IPV4 | \
DP_PACKET_OL_TX_IPV6 | \
DP_PACKET_OL_TX_TCP_CKSUM | \
DP_PACKET_OL_TX_UDP_CKSUM | \
DP_PACKET_OL_TX_SCTP_CKSUM | \
DP_PACKET_OL_TX_IP_CKSUM | \
DP_PACKET_OL_TX_TUNNEL_GENEVE | \
DP_PACKET_OL_TX_TUNNEL_VXLAN | \
DP_PACKET_OL_TX_TUNNEL_GRE | \
DP_PACKET_OL_TX_OUTER_IPV4 | \
DP_PACKET_OL_TX_OUTER_IP_CKSUM | \
DP_PACKET_OL_TX_OUTER_UDP_CKSUM | \
DP_PACKET_OL_TX_OUTER_IPV6)
DP_PACKET_OL_TX_MASK)

#define DP_PACKET_OL_TX_L4_MASK (DP_PACKET_OL_TX_TCP_CKSUM | \
DP_PACKET_OL_TX_UDP_CKSUM | \
Expand Down Expand Up @@ -1316,15 +1318,6 @@ dp_packet_hwol_set_tunnel_gre(struct dp_packet *b)
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TUNNEL_GRE;
}

/* Clears tunnel offloading marks. */
static inline void
dp_packet_hwol_reset_tunnel(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) &= ~(DP_PACKET_OL_TX_TUNNEL_VXLAN |
DP_PACKET_OL_TX_TUNNEL_GRE |
DP_PACKET_OL_TX_TUNNEL_GENEVE);
}

/* Mark packet 'b' as a tunnel packet with outer IPv4 header. */
static inline void
dp_packet_hwol_set_tx_outer_ipv4(struct dp_packet *b)
Expand Down
64 changes: 33 additions & 31 deletions lib/dpif-netdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ COVERAGE_DEFINE(datapath_drop_lock_error);
COVERAGE_DEFINE(datapath_drop_userspace_action_error);
COVERAGE_DEFINE(datapath_drop_tunnel_push_error);
COVERAGE_DEFINE(datapath_drop_tunnel_pop_error);
COVERAGE_DEFINE(datapath_drop_tunnel_tso_recirc);
COVERAGE_DEFINE(datapath_drop_recirc_error);
COVERAGE_DEFINE(datapath_drop_invalid_port);
COVERAGE_DEFINE(datapath_drop_invalid_bond);
Expand Down Expand Up @@ -8523,6 +8522,9 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd,
struct dp_packet *packet;
size_t map_cnt = 0;
bool batch_enable = true;
uint16_t inner_l3_ofs = UINT16_MAX;
uint16_t inner_l4_ofs = UINT16_MAX;
uint64_t tx_ol_flags = 0;

const bool simple_match_enabled =
!md_is_valid && dp_netdev_simple_match_enabled(pmd, port_no);
Expand Down Expand Up @@ -8555,6 +8557,11 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd,

if (!md_is_valid) {
pkt_metadata_init(&packet->md, port_no);
} else if (dp_packet_hwol_is_tunnel(packet)) {
tx_ol_flags = *dp_packet_ol_flags_ptr(packet);
tx_ol_flags &= DP_PACKET_OL_TX_MASK;
inner_l3_ofs = packet->inner_l3_ofs;
inner_l4_ofs = packet->inner_l4_ofs;
}

if (netdev_flow_api && recirc_depth == 0) {
Expand All @@ -8566,6 +8573,13 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd,
}
if (OVS_LIKELY(flow)) {
tcp_flags = parse_tcp_flags(packet, NULL, NULL, NULL);
if (md_is_valid && dp_packet_hwol_is_tunnel(packet)) {
/* Restore previous Tx offloads (for tunneling). */
*dp_packet_ol_flags_ptr(packet) &= ~DP_PACKET_OL_TX_MASK;
*dp_packet_ol_flags_ptr(packet) |= tx_ol_flags;
packet->inner_l3_ofs = inner_l3_ofs;
packet->inner_l4_ofs = inner_l4_ofs;
}
n_phwol_hit++;
dfc_processing_enqueue_classified_packet(
packet, flow, tcp_flags, batch_enable,
Expand All @@ -8579,6 +8593,13 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd,
uint8_t nw_frag = 0;

tcp_flags = parse_tcp_flags(packet, &dl_type, &nw_frag, &vlan_tci);
if (md_is_valid && dp_packet_hwol_is_tunnel(packet)) {
/* Restore previous Tx offloads (for tunneling). */
*dp_packet_ol_flags_ptr(packet) &= ~DP_PACKET_OL_TX_MASK;
*dp_packet_ol_flags_ptr(packet) |= tx_ol_flags;
packet->inner_l3_ofs = inner_l3_ofs;
packet->inner_l4_ofs = inner_l4_ofs;
}
flow = dp_netdev_simple_match_lookup(pmd, port_no, dl_type,
nw_frag, vlan_tci);
if (OVS_LIKELY(flow)) {
Expand All @@ -8592,10 +8613,17 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd,

miniflow_extract(packet, &key->mf);
key->len = 0; /* Not computed yet. */
key->hash =
(md_is_valid == false)
? dpif_netdev_packet_get_rss_hash_orig_pkt(packet, &key->mf)
: dpif_netdev_packet_get_rss_hash(packet, &key->mf);
if (!md_is_valid) {
key->hash =
dpif_netdev_packet_get_rss_hash_orig_pkt(packet, &key->mf);
} else {
key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf);
/* Restore previous Tx offloads (for tunneling). */
*dp_packet_ol_flags_ptr(packet) &= ~DP_PACKET_OL_TX_MASK;
*dp_packet_ol_flags_ptr(packet) |= tx_ol_flags;
packet->inner_l3_ofs = inner_l3_ofs;
packet->inner_l4_ofs = inner_l4_ofs;
}

/* If EMC is disabled skip emc_lookup */
flow = (cur_min != 0) ? emc_lookup(&cache->emc_cache, key) : NULL;
Expand Down Expand Up @@ -8923,32 +8951,6 @@ static void
dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
struct dp_packet_batch *packets)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
size_t i, size = dp_packet_batch_size(packets);
struct dp_packet *packet;

DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, packets) {
if (dp_packet_hwol_is_tunnel(packet)) {
if (dp_packet_hwol_is_tso(packet)) {
/* Can't perform GSO in the middle of a pipeline. */
COVERAGE_INC(datapath_drop_tunnel_tso_recirc);
dp_packet_delete(packet);
VLOG_WARN_RL(&rl, "Recirculating tunnel packets with "
"TSO is not supported");
continue;
}
/* Have to fix all the checksums before re-parsing, because the
* packet will be treated as having a single set of headers. */
dp_packet_ol_send_prepare(packet, 0);
/* This packet must not be marked with anything tunnel-related. */
dp_packet_hwol_reset_tunnel(packet);
/* Clear inner offsets. Other ones are collateral, but they will
* be re-initialized on re-parsing. */
dp_packet_reset_offsets(packet);
}
dp_packet_batch_refill(packets, packet, i);
}

dp_netdev_input__(pmd, packets, true, 0);
}

Expand Down

0 comments on commit 6a19f3b

Please sign in to comment.