diff --git a/.jenkins/pr_verify.sh b/.jenkins/pr_verify.sh index 74c887244..b4b4f07ab 100644 --- a/.jenkins/pr_verify.sh +++ b/.jenkins/pr_verify.sh @@ -20,7 +20,11 @@ echo "Build all profiles using SDE ${SDE_P4C_DOCKER_IMG}..." # Pull first to avoid pulling multiple times in parallel by the make jobs docker pull "${SDE_P4C_DOCKER_IMG}" # Jenkins uses 8 cores 15G VM -make -j8 all +# We commented out 'all' target, because we exceeded 45 min limit on Jenkins. +# TODO: revert once the PTF tests execution time is optimized +# make -j8 all +make -j8 fabric-int +make -j8 fabric-spgw-int echo "Build and verify Java pipeconf" make constants pipeconf MVN_FLAGS="-Pci-verify -Pcoverage" diff --git a/p4src/include/control/hasher.p4 b/p4src/include/control/hasher.p4 index b3cf8939c..73c46a856 100644 --- a/p4src/include/control/hasher.p4 +++ b/p4src/include/control/hasher.p4 @@ -8,7 +8,6 @@ struct gtp_flow_t { bit<32> ipv4_src; bit<32> ipv4_dst; - bit<8> ip_proto; teid_t gtpu_teid; } @@ -21,34 +20,40 @@ control Hasher( Hash(HashAlgorithm_t.CRC32) non_ip_hasher; apply { + gtp_flow_t to_hash; + flow_hash_t inner_hash; + bool calc_gtp_hash = false; + + // checks if inner header is IPv4 if (fabric_md.acl_lkp.is_ipv4) { - // we always need to calculate hash from inner headers for the INT reporter - fabric_md.bridged.base.int_hash = ip_hasher.get(fabric_md.acl_lkp); + // we always need to calculate hash from the inner IPv4 header for the INT reporter. + inner_hash = ip_hasher.get(fabric_md.acl_lkp); - // if not a GTP flow, use hash calculated from inner headers - fabric_md.flow_hash = fabric_md.bridged.base.int_hash; -#ifdef WITH_SPGW - gtp_flow_t to_hash; - bool calc_gtp_hash = false; + // use inner hash by default + fabric_md.ecmp_hash = inner_hash; + + // if an outer GTP header exists, use it to perform GTP-aware ECMP if (hdr.gtpu.isValid()) { - // for GTP-encapsulated IPv4 packet use outer IPv4 header for hashing to_hash.ipv4_src = hdr.ipv4.src_addr; to_hash.ipv4_dst = hdr.ipv4.dst_addr; - to_hash.ip_proto = hdr.ipv4.protocol; to_hash.gtpu_teid = hdr.gtpu.teid; calc_gtp_hash = true; - } else if (fabric_md.bridged.spgw.needs_gtpu_encap) { + } + +#ifdef WITH_SPGW + // enable GTP-aware ECMP for downlink packets. + if (fabric_md.bridged.spgw.needs_gtpu_encap) { to_hash.ipv4_src = fabric_md.bridged.spgw.gtpu_tunnel_sip; to_hash.ipv4_dst = fabric_md.bridged.spgw.gtpu_tunnel_dip; - // we assume UDP protocol - to_hash.ip_proto = PROTO_UDP; to_hash.gtpu_teid = fabric_md.bridged.spgw.gtpu_teid; calc_gtp_hash = true; } +#endif // WITH_SPGW + if (calc_gtp_hash) { - fabric_md.flow_hash = gtp_flow_hasher.get(to_hash); + fabric_md.ecmp_hash = gtp_flow_hasher.get(to_hash); } -#endif // WITH_SPGW + } // FIXME: remove ipv6 support or test it // https://github.com/stratum/fabric-tna/pull/227 @@ -62,12 +67,17 @@ control Hasher( // }); // } else { - // Not an IP packet - fabric_md.flow_hash = non_ip_hasher.get({ + // Inner header is not an IPv4 packet + inner_hash = non_ip_hasher.get({ hdr.ethernet.dst_addr, hdr.ethernet.src_addr, hdr.eth_type.value }); } + +#ifdef WITH_INT + fabric_md.bridged.int_bmd.inner_hash = inner_hash; +#endif // WITH_INT + } } diff --git a/p4src/include/control/int.p4 b/p4src/include/control/int.p4 index bb5c869aa..81d6b3846 100644 --- a/p4src/include/control/int.p4 +++ b/p4src/include/control/int.p4 @@ -62,11 +62,11 @@ control FlowReportFilter( fabric_md.bridged.base.ig_port, eg_intr_md.egress_port, fabric_md.int_md.hop_latency, - fabric_md.bridged.base.int_hash, + fabric_md.bridged.int_bmd.inner_hash, fabric_md.int_md.timestamp }); - flag = filter_get_and_set1.execute(fabric_md.bridged.base.int_hash[31:16]); - flag = flag | filter_get_and_set2.execute(fabric_md.bridged.base.int_hash[15:0]); + flag = filter_get_and_set1.execute(fabric_md.bridged.int_bmd.inner_hash[31:16]); + flag = flag | filter_get_and_set2.execute(fabric_md.bridged.int_bmd.inner_hash[15:0]); // Generate report only when ALL register actions detect a change. if (flag == 1) { eg_dprsr_md.mirror_type = (bit<3>)FabricMirrorType_t.INVALID; @@ -187,7 +187,7 @@ control IntIngress ( fabric_md.int_mirror_md.ip_eth_type = fabric_md.bridged.base.ip_eth_type; fabric_md.int_mirror_md.eg_port = (bit<16>)ig_tm_md.ucast_egress_port; fabric_md.int_mirror_md.queue_id = (bit<8>)ig_tm_md.qid; - fabric_md.int_mirror_md.flow_hash = fabric_md.bridged.base.int_hash; + fabric_md.int_mirror_md.flow_hash = fabric_md.bridged.int_bmd.inner_hash; ig_dprsr_md.drop_ctl = 1; #ifdef WITH_DEBUG drop_report_counter.count(); @@ -406,7 +406,7 @@ control IntEgress ( fabric_md.int_mirror_md.ig_tstamp = fabric_md.bridged.base.ig_tstamp[31:0]; fabric_md.int_mirror_md.eg_tstamp = eg_prsr_md.global_tstamp[31:0]; fabric_md.int_mirror_md.ip_eth_type = fabric_md.bridged.base.ip_eth_type; - fabric_md.int_mirror_md.flow_hash = fabric_md.bridged.base.int_hash; + fabric_md.int_mirror_md.flow_hash = fabric_md.bridged.int_bmd.inner_hash; // fabric_md.int_mirror_md.vlan_stripped set by egress_vlan table // fabric_md.int_mirror_md.strip_gtpu will be initialized by the parser } diff --git a/p4src/include/control/next.p4 b/p4src/include/control/next.p4 index a9c52b4b5..6da1c6d0c 100644 --- a/p4src/include/control/next.p4 +++ b/p4src/include/control/next.p4 @@ -130,7 +130,7 @@ control Next (inout parsed_headers_t hdr, table hashed { key = { fabric_md.next_id : exact @name("next_id"); - fabric_md.flow_hash : selector; + fabric_md.ecmp_hash : selector; } actions = { output_hashed; diff --git a/p4src/include/header.p4 b/p4src/include/header.p4 index 6e5a3e1ec..c340d5a86 100644 --- a/p4src/include/header.p4 +++ b/p4src/include/header.p4 @@ -237,6 +237,7 @@ struct int_bridged_metadata_t { IntReportType_t report_type; @padding bit<6> _pad1; MirrorId_t mirror_session_id; + flow_hash_t inner_hash; } struct int_metadata_t { @@ -262,7 +263,6 @@ struct bridged_metadata_base_t { bit<8> mpls_ttl; bit<48> ig_tstamp; bit<16> ip_eth_type; - flow_hash_t int_hash; #ifdef WITH_DOUBLE_VLAN_TERMINATION @padding bit<7> _pad1; bool push_double_vlan; @@ -303,7 +303,7 @@ struct acl_lookup_t { @pa_auto_init_metadata struct fabric_ingress_metadata_t { bridged_metadata_t bridged; - flow_hash_t flow_hash; + flow_hash_t ecmp_hash; acl_lookup_t acl_lkp; bit<32> routing_ipv4_dst; // Outermost bool skip_forwarding; diff --git a/ptf/tests/ptf/fabric.ptf/test.py b/ptf/tests/ptf/fabric.ptf/test.py index a5c9b4755..d84d05e50 100644 --- a/ptf/tests/ptf/fabric.ptf/test.py +++ b/ptf/tests/ptf/fabric.ptf/test.py @@ -1024,20 +1024,19 @@ def runTest(self): self.verify_no_other_packets() -@group("spgw") -class FabricGTPUnicastGroupTestAllPortTEID(FabricTest): +class FabricGTPUnicastECMPbasedOnTEID(FabricTest): """ This test case verifies if the GTP encapsulated traffic - is distributed over next hops by the fabric. + is distributed over next hops by hashing on the TEID. """ @tvsetup @autocleanup - def GTPUnicastGroupTestAllPortTEID(self, pkt_type): + def doRunTest(self, pkt_type): # In this test we check that packets are forwarded to all ports when we - # change one of the 5-tuple header values and we have an ECMP-like + # change one of the values used for hash calculcation and we have an ECMP-like # distribution. - # In this case TEID for GTP-encapsulated packets + # In this case, we change TEID for GTP-encapsulated packets vlan_id = 10 self.set_ingress_port_vlan(self.port1, False, 0, vlan_id) self.set_forwarding_type( @@ -1122,21 +1121,21 @@ def GTPUnicastGroupTestAllPortTEID(self, pkt_type): # In this assertion we are verifying: # 1) all ports of the same group are used almost once # 2) consistency of the forwarding decision, i.e. packets with the - # same 5-tuple fields are always forwarded out of the same port + # same hashed fields are always forwarded out of the same port self.verify_each_packet_on_each_port( [exp_pkt_to2, exp_pkt_to3], [self.port2, self.port3] ) def runTest(self): for pkt_type in ["tcp", "udp", "icmp"]: - self.GTPUnicastGroupTestAllPortTEID(pkt_type) + self.doRunTest(pkt_type) @group("spgw") -class FabricSpgwDownlinkLoadBalancingTest(SpgwSimpleTest): +class FabricSpgwDownlinkECMPTest(SpgwSimpleTest): """ - This test case verifies if from PDN to UE (downlink) is - distrubted over next hops. + This test case verifies if traffic from PDN to UEs (downlink) served by the same + base station is distributed over next hops using GTP-aware load balancing. """ @tvsetup @@ -1168,7 +1167,6 @@ def doRunTest(self, pkt_type): ue_ipv4_toport = [None, None] # teid_toport list is used to learn the teid # assigned by SPGW for a downlink packet. - # This teid causes the packet to be forwarded for each port. teid_toport = [None, None] for i in range(50): ue_ipv4 = "10.0.0." + str(i) diff --git a/ptf/tests/ptf/fabric_test.py b/ptf/tests/ptf/fabric_test.py index c820caca7..cba38b38f 100644 --- a/ptf/tests/ptf/fabric_test.py +++ b/ptf/tests/ptf/fabric_test.py @@ -217,7 +217,7 @@ if testutils.test_param_get("profile") == "fabric-spgw-int": BMD_BYTES = 43 elif testutils.test_param_get("profile") == "fabric-spgw": - BMD_BYTES = 40 + BMD_BYTES = 36 elif testutils.test_param_get("profile") == "fabric-int": BMD_BYTES = 24 else: