diff --git a/AUTHORS.rst b/AUTHORS.rst
index d3747f8d1..5d60088e4 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -395,6 +395,7 @@ Vishal Deep Ajmera vishal.deep.ajmera@ericsson.com
Vivien Bernet-Rollande vbr@soprive.net
Vladislav Odintsov odivlad@gmail.com
wangqianyu wang.qianyu@zte.com.cn
+wangchuanlei wangchuanlei@inspur.com
Wang Sheng-Hui shhuiw@gmail.com
Wang Zhike wangzhike@jd.com
Wei Li liw@dtdream.com
diff --git a/Documentation/conf.py b/Documentation/conf.py
index d89c64e77..f7eceaec8 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -58,7 +58,7 @@ author = u'The Open Virtual Network (OVN) Development Community'
# The full version, including alpha/beta/rc tags.
release = None
filename = "../configure.ac"
-with open(filename, 'rU') as f:
+with open(filename, 'r') as f:
for line in f:
if 'AC_INIT' in line:
# Parse "AC_INIT(openvswitch, 2.7.90, bugs@openvswitch.org)":
diff --git a/Documentation/tutorials/ovn-ipsec.rst b/Documentation/tutorials/ovn-ipsec.rst
index 305dd566d..aea7aa309 100644
--- a/Documentation/tutorials/ovn-ipsec.rst
+++ b/Documentation/tutorials/ovn-ipsec.rst
@@ -93,6 +93,29 @@ database to false::
# systemctl enable firewalld
# firewall-cmd --permanent --add-service ipsec
+Enforcing IPsec NAT-T UDP encapsulation
+---------------------------------------
+
+In specific situations, it may be required to enforce NAT-T (RFC3948) UDP
+encapsulation unconditionally and to bypass the normal NAT detection mechanism.
+For example, this may be required in environments where firewalls drop ESP
+traffic, but where NAT-T detection (RFC3947) fails because packets otherwise
+are not subject to NAT.
+In such scenarios, UDP encapsulation can be enforced with the following.
+
+For libreswan backends::
+
+ $ ovn-nbctl set nb_global . options:ipsec_encapsulation=true
+
+For strongswan backends::
+
+ $ ovn-nbctl set nb_global . options:ipsec_forceencaps=true
+
+.. note::
+
+ Support for this feature is only availably when OVN is used together with
+ OVS releases that accept IPsec custom tunnel options.
+
Troubleshooting
---------------
@@ -119,6 +142,7 @@ For example::
Remote name: host_2
CA cert: /path/to/cacert.pem
PSK: None
+ Custom Options: {'encapsulation': 'yes'} <---- Whether NAT-T is enforced
Ofport: 2 <--- Whether ovs-vswitchd has assigned Ofport
number to this Tunnel Port
CFM state: Disabled <--- Whether CFM declared this tunnel healthy
diff --git a/NEWS b/NEWS
index e335f64c2..f92be139a 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,10 @@
+OVN v22.06.1 - xx xxx xxxx
+--------------------------
+ - Added nb_global IPsec options ipsec_encapsulation=true (libreswan)
+ and ipsec_forceencaps=true (strongswan) to unconditionally enforce
+ NAT-T UDP encapsulation. Requires OVS support for IPsec custom tunnel
+ options (which will be available in OVS 3.0).
+
OVN v22.06.0 - 03 Jun 2022
--------------------------
- Support IGMP and MLD snooping on transit logical switches that connect
@@ -24,6 +31,8 @@ OVN v22.06.0 - 03 Jun 2022
- Added support for setting the Next server IP in the DHCP header
using the private DHCP option - 253 in native OVN DHCPv4 responder.
- Support list of chassis for Logical_Switch_Port:options:requested-chassis.
+ - Support Logical_Switch_Port:options:activation-strategy for live migration
+ scenarios.
OVN v22.03.0 - 11 Mar 2022
--------------------------
diff --git a/configure.ac b/configure.ac
index b649441bc..739e0295e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -13,7 +13,7 @@
# limitations under the License.
AC_PREREQ(2.63)
-AC_INIT(ovn, 22.06.0, bugs@openvswitch.org)
+AC_INIT(ovn, 22.06.1, bugs@openvswitch.org)
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_HEADERS([config.h])
diff --git a/controller/binding.c b/controller/binding.c
index 2279570f9..9025681db 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -386,6 +386,23 @@ update_ld_external_ports(const struct sbrec_port_binding *binding_rec,
}
}
+static void
+update_ld_multichassis_ports(const struct sbrec_port_binding *binding_rec,
+ struct hmap *local_datapaths)
+{
+ struct local_datapath *ld = get_local_datapath(
+ local_datapaths, binding_rec->datapath->tunnel_key);
+ if (!ld) {
+ return;
+ }
+ if (binding_rec->additional_chassis) {
+ add_local_datapath_multichassis_port(ld, binding_rec->logical_port,
+ binding_rec);
+ } else {
+ remove_local_datapath_multichassis_port(ld, binding_rec->logical_port);
+ }
+}
+
static void
update_ld_localnet_port(const struct sbrec_port_binding *binding_rec,
struct shash *bridge_mappings,
@@ -1752,6 +1769,8 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
struct ovs_list localnet_lports = OVS_LIST_INITIALIZER(&localnet_lports);
struct ovs_list external_lports = OVS_LIST_INITIALIZER(&external_lports);
+ struct ovs_list multichassis_ports = OVS_LIST_INITIALIZER(
+ &multichassis_ports);
struct lport {
struct ovs_list list_node;
@@ -1787,6 +1806,13 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
case LP_VIF:
consider_vif_lport(pb, b_ctx_in, b_ctx_out, NULL, qos_map_ptr);
+ if (pb->additional_chassis) {
+ struct lport *multichassis_lport = xmalloc(
+ sizeof *multichassis_lport);
+ multichassis_lport->pb = pb;
+ ovs_list_push_back(&multichassis_ports,
+ &multichassis_lport->list_node);
+ }
break;
case LP_CONTAINER:
@@ -1862,6 +1888,16 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
free(ext_lport);
}
+ /* Run through multichassis lport list to see if these are ports
+ * on local datapaths discovered from above loop, and update the
+ * corresponding local datapath accordingly. */
+ struct lport *multichassis_lport;
+ LIST_FOR_EACH_POP (multichassis_lport, list_node, &multichassis_ports) {
+ update_ld_multichassis_ports(multichassis_lport->pb,
+ b_ctx_out->local_datapaths);
+ free(multichassis_lport);
+ }
+
shash_destroy(&bridge_mappings);
if (!sset_is_empty(b_ctx_out->egress_ifaces)
@@ -1934,6 +1970,7 @@ remove_pb_from_local_datapath(const struct sbrec_port_binding *pb,
} else if (!strcmp(pb->type, "external")) {
remove_local_datapath_external_port(ld, pb->logical_port);
}
+ remove_local_datapath_multichassis_port(ld, pb->logical_port);
}
static void
@@ -2677,6 +2714,7 @@ delete_done:
case LP_VIF:
case LP_CONTAINER:
case LP_VIRTUAL:
+ update_ld_multichassis_ports(pb, b_ctx_out->local_datapaths);
handled = handle_updated_vif_lport(pb, lport_type, b_ctx_in,
b_ctx_out, qos_map_ptr);
break;
diff --git a/controller/encaps.c b/controller/encaps.c
index ed01b1368..476da05af 100644
--- a/controller/encaps.c
+++ b/controller/encaps.c
@@ -199,6 +199,21 @@ tunnel_add(struct tunnel_ctx *tc, const struct sbrec_sb_global *sbg,
if (sbg->ipsec) {
set_local_ip = true;
smap_add(&options, "remote_name", new_chassis_id);
+
+ /* Force NAT-T traversal via configuration */
+ /* Two ipsec backends are supported: libreswan and strongswan */
+ /* libreswan param: encapsulation; strongswan param: forceencaps */
+ bool encapsulation;
+ bool forceencaps;
+ encapsulation = smap_get_bool(&sbg->options, "ipsec_encapsulation",
+ false);
+ forceencaps = smap_get_bool(&sbg->options, "ipsec_forceencaps", false);
+ if (encapsulation) {
+ smap_add(&options, "ipsec_encapsulation", "yes");
+ }
+ if (forceencaps) {
+ smap_add(&options, "ipsec_forceencaps", "yes");
+ }
}
if (set_local_ip) {
diff --git a/controller/lflow.c b/controller/lflow.c
index 934b23698..6055097b5 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -1775,6 +1775,7 @@ add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip,
uint64_t cookie, struct ofpbuf *ofpacts)
{
struct match match = MATCH_CATCHALL_INITIALIZER;
+ size_t ol_offset = ofpacts->size;
struct ofpact_learn *ol = ofpact_put_LEARN(ofpacts);
struct ofpact_learn_spec *ol_spec;
unsigned int imm_bytes;
@@ -1928,6 +1929,8 @@ add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip,
src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
memcpy(src_imm, &imm_reg_value, imm_bytes);
+ /* Reload ol pointer since ofpacts buffer can be reallocated. */
+ ol = ofpbuf_at_assert(ofpacts, ol_offset, sizeof *ol);
ofpact_finish_LEARN(ofpacts, &ol);
}
diff --git a/controller/local_data.c b/controller/local_data.c
index 98445902b..7f874fc19 100644
--- a/controller/local_data.c
+++ b/controller/local_data.c
@@ -72,6 +72,7 @@ local_datapath_alloc(const struct sbrec_datapath_binding *dp)
ld->is_switch = datapath_is_switch(dp);
ld->is_transit_switch = datapath_is_transit_switch(dp);
shash_init(&ld->external_ports);
+ shash_init(&ld->multichassis_ports);
/* memory accounting - common part. */
local_datapath_usage += sizeof *ld;
@@ -97,13 +98,20 @@ local_datapath_destroy(struct local_datapath *ld)
SHASH_FOR_EACH (node, &ld->external_ports) {
local_datapath_usage -= strlen(node->name);
}
- local_datapath_usage -= shash_count(&ld->external_ports) * sizeof *node;
+ SHASH_FOR_EACH (node, &ld->multichassis_ports) {
+ local_datapath_usage -= strlen(node->name);
+ }
+ local_datapath_usage -= (shash_count(&ld->external_ports)
+ * sizeof *node);
+ local_datapath_usage -= (shash_count(&ld->multichassis_ports)
+ * sizeof *node);
local_datapath_usage -= sizeof *ld;
local_datapath_usage -=
ld->n_allocated_peer_ports * sizeof *ld->peer_ports;
free(ld->peer_ports);
shash_destroy(&ld->external_ports);
+ shash_destroy(&ld->multichassis_ports);
free(ld);
}
@@ -274,6 +282,26 @@ remove_local_datapath_external_port(struct local_datapath *ld,
}
}
+void
+add_local_datapath_multichassis_port(struct local_datapath *ld,
+ char *logical_port, const void *data)
+{
+ if (!shash_replace(&ld->multichassis_ports, logical_port, data)) {
+ local_datapath_usage += sizeof(struct shash_node) +
+ strlen(logical_port);
+ }
+}
+
+void
+remove_local_datapath_multichassis_port(struct local_datapath *ld,
+ char *logical_port)
+{
+ if (shash_find_and_delete(&ld->multichassis_ports, logical_port)) {
+ local_datapath_usage -= sizeof(struct shash_node) +
+ strlen(logical_port);
+ }
+}
+
void
local_datapath_memory_usage(struct simap *usage)
{
diff --git a/controller/local_data.h b/controller/local_data.h
index 9306ddf15..d898c8aa5 100644
--- a/controller/local_data.h
+++ b/controller/local_data.h
@@ -58,6 +58,7 @@ struct local_datapath {
size_t n_allocated_peer_ports;
struct shash external_ports;
+ struct shash multichassis_ports;
};
struct local_datapath *local_datapath_alloc(
@@ -155,5 +156,10 @@ void add_local_datapath_external_port(struct local_datapath *ld,
char *logical_port, const void *data);
void remove_local_datapath_external_port(struct local_datapath *ld,
char *logical_port);
+void add_local_datapath_multichassis_port(struct local_datapath *ld,
+ char *logical_port,
+ const void *data);
+void remove_local_datapath_multichassis_port(struct local_datapath *ld,
+ char *logical_port);
#endif /* controller/local_data.h */
diff --git a/controller/lport.c b/controller/lport.c
index bf55d83f2..add7e91aa 100644
--- a/controller/lport.c
+++ b/controller/lport.c
@@ -197,3 +197,25 @@ get_peer_lport(const struct sbrec_port_binding *pb,
peer_name);
return (peer && peer->datapath) ? peer : NULL;
}
+
+bool
+lport_is_activated_by_activation_strategy(const struct sbrec_port_binding *pb,
+ const struct sbrec_chassis *chassis)
+{
+ const char *activated_chassis = smap_get(&pb->options,
+ "additional-chassis-activated");
+ if (activated_chassis) {
+ char *save_ptr;
+ char *tokstr = xstrdup(activated_chassis);
+ for (const char *chassis_name = strtok_r(tokstr, ",", &save_ptr);
+ chassis_name != NULL;
+ chassis_name = strtok_r(NULL, ",", &save_ptr)) {
+ if (!strcmp(chassis_name, chassis->name)) {
+ free(tokstr);
+ return true;
+ }
+ }
+ free(tokstr);
+ }
+ return false;
+}
diff --git a/controller/lport.h b/controller/lport.h
index 115881655..644c67255 100644
--- a/controller/lport.h
+++ b/controller/lport.h
@@ -70,4 +70,7 @@ const struct sbrec_port_binding *lport_get_peer(
const struct sbrec_port_binding *lport_get_l3gw_peer(
const struct sbrec_port_binding *,
struct ovsdb_idl_index *sbrec_port_binding_by_name);
+bool
+lport_is_activated_by_activation_strategy(const struct sbrec_port_binding *pb,
+ const struct sbrec_chassis *chassis);
#endif /* controller/lport.h */
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 2793c8687..2e9138036 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -1128,6 +1128,53 @@ ovs_interface_shadow_ovs_interface_handler(struct engine_node *node,
return true;
}
+struct ed_type_activated_ports {
+ struct ovs_list *activated_ports;
+};
+
+static void *
+en_activated_ports_init(struct engine_node *node OVS_UNUSED,
+ struct engine_arg *arg OVS_UNUSED)
+{
+ struct ed_type_activated_ports *data = xzalloc(sizeof *data);
+ data->activated_ports = NULL;
+ return data;
+}
+
+static void
+en_activated_ports_cleanup(void *data_)
+{
+ struct ed_type_activated_ports *data = data_;
+ if (!data->activated_ports) {
+ return;
+ }
+
+ struct activated_port *pp;
+ LIST_FOR_EACH_POP (pp, list, data->activated_ports) {
+ free(pp);
+ }
+ free(data->activated_ports);
+ data->activated_ports = NULL;
+}
+
+static void
+en_activated_ports_clear_tracked_data(void *data)
+{
+ en_activated_ports_cleanup(data);
+}
+
+static void
+en_activated_ports_run(struct engine_node *node, void *data_)
+{
+ struct ed_type_activated_ports *data = data_;
+ enum engine_node_state state = EN_UNCHANGED;
+ data->activated_ports = get_ports_to_activate_in_engine();
+ if (data->activated_ports) {
+ state = EN_UPDATED;
+ }
+ engine_set_node_state(node, state);
+}
+
struct ed_type_runtime_data {
/* Contains "struct local_datapath" nodes. */
struct hmap local_datapaths;
@@ -2953,6 +3000,11 @@ static void init_physical_ctx(struct engine_node *node,
engine_get_input("SB_port_binding", node),
"name");
+ struct ovsdb_idl_index *sbrec_port_binding_by_datapath =
+ engine_ovsdb_node_get_index(
+ engine_get_input("SB_port_binding", node),
+ "datapath");
+
struct sbrec_multicast_group_table *multicast_group_table =
(struct sbrec_multicast_group_table *)EN_OVSDB_GET(
engine_get_input("SB_multicast_group", node));
@@ -2992,6 +3044,7 @@ static void init_physical_ctx(struct engine_node *node,
struct simap *ct_zones = &ct_zones_data->current;
p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
+ p_ctx->sbrec_port_binding_by_datapath = sbrec_port_binding_by_datapath;
p_ctx->port_binding_table = port_binding_table;
p_ctx->mc_group_table = multicast_group_table;
p_ctx->br_int = br_int;
@@ -3164,6 +3217,49 @@ pflow_output_ct_zones_handler(struct engine_node *node OVS_UNUSED,
return !ct_zones_data->recomputed;
}
+static bool
+pflow_output_activated_ports_handler(struct engine_node *node, void *data)
+{
+ struct ed_type_activated_ports *ap =
+ engine_get_input_data("activated_ports", node);
+ if (!ap->activated_ports) {
+ return true;
+ }
+
+ struct ed_type_pflow_output *pfo = data;
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+ struct ed_type_non_vif_data *non_vif_data =
+ engine_get_input_data("non_vif_data", node);
+
+ struct physical_ctx p_ctx;
+ init_physical_ctx(node, rt_data, non_vif_data, &p_ctx);
+
+ struct activated_port *pp;
+ LIST_FOR_EACH (pp, list, ap->activated_ports) {
+ struct ovsdb_idl_index *sbrec_datapath_binding_by_key =
+ engine_ovsdb_node_get_index(
+ engine_get_input("SB_datapath_binding", node),
+ "key");
+ struct ovsdb_idl_index *sbrec_port_binding_by_key =
+ engine_ovsdb_node_get_index(
+ engine_get_input("SB_port_binding", node),
+ "key");
+ const struct sbrec_port_binding *pb = lport_lookup_by_key(
+ sbrec_datapath_binding_by_key, sbrec_port_binding_by_key,
+ pp->dp_key, pp->port_key);
+ if (pb) {
+ if (!physical_handle_flows_for_lport(pb, false, &p_ctx,
+ &pfo->flow_table)) {
+ return false;
+ }
+ tag_port_as_activated_in_engine(pp);
+ }
+ }
+ engine_set_node_state(node, EN_UPDATED);
+ return true;
+}
+
static void *
en_flow_output_init(struct engine_node *node OVS_UNUSED,
struct engine_arg *arg OVS_UNUSED)
@@ -3445,6 +3541,7 @@ main(int argc, char *argv[])
ENGINE_NODE(non_vif_data, "non_vif_data");
ENGINE_NODE(mff_ovn_geneve, "mff_ovn_geneve");
ENGINE_NODE(ofctrl_is_connected, "ofctrl_is_connected");
+ ENGINE_NODE_WITH_CLEAR_TRACK_DATA(activated_ports, "activated_ports");
ENGINE_NODE(pflow_output, "physical_flow_output");
ENGINE_NODE_WITH_CLEAR_TRACK_DATA(lflow_output, "logical_flow_output");
ENGINE_NODE(flow_output, "flow_output");
@@ -3492,6 +3589,14 @@ main(int argc, char *argv[])
engine_add_input(&en_pflow_output, &en_sb_multicast_group,
pflow_output_sb_multicast_group_handler);
+ /* pflow_output needs to access the SB datapath binding and hence a noop
+ * handler.
+ */
+ engine_add_input(&en_pflow_output, &en_sb_datapath_binding,
+ engine_noop_handler);
+ engine_add_input(&en_pflow_output, &en_activated_ports,
+ pflow_output_activated_ports_handler);
+
engine_add_input(&en_pflow_output, &en_runtime_data,
pflow_output_runtime_data_handler);
engine_add_input(&en_pflow_output, &en_sb_encap, NULL);
diff --git a/controller/physical.c b/controller/physical.c
index 24de86f24..816a557e7 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -40,7 +40,9 @@
#include "lib/mcast-group-index.h"
#include "lib/ovn-sb-idl.h"
#include "lib/ovn-util.h"
+#include "ovn/actions.h"
#include "physical.h"
+#include "pinctrl.h"
#include "openvswitch/shash.h"
#include "simap.h"
#include "smap.h"
@@ -984,6 +986,155 @@ enum access_type {
PORT_HA_REMOTE,
};
+static void
+setup_rarp_activation_strategy(const struct sbrec_port_binding *binding,
+ ofp_port_t ofport, struct zone_ids *zone_ids,
+ struct ovn_desired_flow_table *flow_table,
+ struct ofpbuf *ofpacts_p)
+{
+ struct match match = MATCH_CATCHALL_INITIALIZER;
+
+ /* Unblock the port on ingress RARP. */
+ match_set_dl_type(&match, htons(ETH_TYPE_RARP));
+ match_set_in_port(&match, ofport);
+ ofpbuf_clear(ofpacts_p);
+
+ load_logical_ingress_metadata(binding, zone_ids, ofpacts_p);
+
+ size_t ofs = ofpacts_p->size;
+ struct ofpact_controller *oc = ofpact_put_CONTROLLER(ofpacts_p);
+ oc->max_len = UINT16_MAX;
+ oc->reason = OFPR_ACTION;
+
+ struct action_header ah = {
+ .opcode = htonl(ACTION_OPCODE_ACTIVATION_STRATEGY_RARP)
+ };
+ ofpbuf_put(ofpacts_p, &ah, sizeof ah);
+
+ ofpacts_p->header = oc;
+ oc->userdata_len = ofpacts_p->size - (ofs + sizeof *oc);
+ ofpact_finish_CONTROLLER(ofpacts_p, &oc);
+ put_resubmit(OFTABLE_LOG_INGRESS_PIPELINE, ofpacts_p);
+
+ ofctrl_add_flow(flow_table, OFTABLE_PHY_TO_LOG, 1010,
+ binding->header_.uuid.parts[0],
+ &match, ofpacts_p, &binding->header_.uuid);
+ ofpbuf_clear(ofpacts_p);
+
+ /* Block all non-RARP traffic for the port, both directions. */
+ match_init_catchall(&match);
+ match_set_in_port(&match, ofport);
+
+ ofctrl_add_flow(flow_table, OFTABLE_PHY_TO_LOG, 1000,
+ binding->header_.uuid.parts[0],
+ &match, ofpacts_p, &binding->header_.uuid);
+
+ match_init_catchall(&match);
+ uint32_t dp_key = binding->datapath->tunnel_key;
+ uint32_t port_key = binding->tunnel_key;
+ match_set_metadata(&match, htonll(dp_key));
+ match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+
+ ofctrl_add_flow(flow_table, OFTABLE_LOG_TO_PHY, 1000,
+ binding->header_.uuid.parts[0],
+ &match, ofpacts_p, &binding->header_.uuid);
+}
+
+static void
+setup_activation_strategy(const struct sbrec_port_binding *binding,
+ const struct sbrec_chassis *chassis,
+ uint32_t dp_key, uint32_t port_key,
+ ofp_port_t ofport, struct zone_ids *zone_ids,
+ struct ovn_desired_flow_table *flow_table,
+ struct ofpbuf *ofpacts_p)
+{
+ for (size_t i = 0; i < binding->n_additional_chassis; i++) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ if (binding->additional_chassis[i] == chassis) {
+ const char *strategy = smap_get(&binding->options,
+ "activation-strategy");
+ if (strategy
+ && !lport_is_activated_by_activation_strategy(binding,
+ chassis)
+ && !pinctrl_is_port_activated(dp_key, port_key)) {
+ if (!strcmp(strategy, "rarp")) {
+ setup_rarp_activation_strategy(binding, ofport,
+ zone_ids, flow_table,
+ ofpacts_p);
+ } else {
+ VLOG_WARN_RL(&rl,
+ "Unknown activation strategy defined for "
+ "port %s: %s",
+ binding->logical_port, strategy);
+ return;
+ }
+ }
+ return;
+ }
+ }
+}
+
+static void
+enforce_tunneling_for_multichassis_ports(
+ struct local_datapath *ld,
+ const struct sbrec_port_binding *binding,
+ const struct sbrec_chassis *chassis,
+ const struct hmap *chassis_tunnels,
+ enum mf_field_id mff_ovn_geneve,
+ struct ovn_desired_flow_table *flow_table)
+{
+ if (shash_is_empty(&ld->multichassis_ports)) {
+ return;
+ }
+
+ struct ovs_list *tuns = get_remote_tunnels(binding, chassis,
+ chassis_tunnels);
+ if (ovs_list_is_empty(tuns)) {
+ free(tuns);
+ return;
+ }
+
+ uint32_t dp_key = binding->datapath->tunnel_key;
+ uint32_t port_key = binding->tunnel_key;
+
+ struct shash_node *node;
+ SHASH_FOR_EACH (node, &ld->multichassis_ports) {
+ const struct sbrec_port_binding *mcp = node->data;
+
+ struct ofpbuf ofpacts;
+ ofpbuf_init(&ofpacts, 0);
+
+ bool is_vtep_port = !strcmp(binding->type, "vtep");
+ /* rewrite MFF_IN_PORT to bypass OpenFlow loopback check for ARP/ND
+ * responder in L3 networks. */
+ if (is_vtep_port) {
+ put_load(ofp_to_u16(OFPP_NONE), MFF_IN_PORT, 0, 16, &ofpacts);
+ }
+
+ struct match match;
+ match_outport_dp_and_port_keys(&match, dp_key, port_key);
+ match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0, mcp->tunnel_key);
+
+ struct tunnel *tun;
+ LIST_FOR_EACH (tun, list_node, tuns) {
+ put_encapsulation(mff_ovn_geneve, tun->tun,
+ binding->datapath, port_key, is_vtep_port,
+ &ofpacts);
+ ofpact_put_OUTPUT(&ofpacts)->port = tun->tun->ofport;
+ }
+ ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 110,
+ binding->header_.uuid.parts[0], &match, &ofpacts,
+ &binding->header_.uuid);
+ ofpbuf_uninit(&ofpacts);
+ }
+
+ struct tunnel *tun_elem;
+ LIST_FOR_EACH_POP (tun_elem, list_node, tuns) {
+ free(tun_elem);
+ }
+ free(tuns);
+}
+
static void
consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
enum mf_field_id mff_ovn_geneve,
@@ -1239,6 +1390,10 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
}
}
+ setup_activation_strategy(binding, chassis, dp_key, port_key,
+ ofport, &zone_ids, flow_table,
+ ofpacts_p);
+
/* Remember the size with just strip vlan added so far,
* as we're going to remove this with ofpbuf_pull() later. */
uint32_t ofpacts_orig_size = ofpacts_p->size;
@@ -1415,6 +1570,9 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
binding->header_.uuid.parts[0],
&match, ofpacts_p, &binding->header_.uuid);
+ enforce_tunneling_for_multichassis_ports(
+ ld, binding, chassis, chassis_tunnels, mff_ovn_geneve, flow_table);
+
/* No more tunneling to set up. */
goto out;
}
@@ -1733,20 +1891,49 @@ physical_handle_flows_for_lport(const struct sbrec_port_binding *pb,
ofctrl_remove_flows(flow_table, &pb->header_.uuid);
+ struct local_datapath *ldp =
+ get_local_datapath(p_ctx->local_datapaths,
+ pb->datapath->tunnel_key);
if (!strcmp(pb->type, "external")) {
/* External lports have a dependency on the localnet port.
* We need to remove the flows of the localnet port as well
* and re-consider adding the flows for it.
*/
- struct local_datapath *ldp =
- get_local_datapath(p_ctx->local_datapaths,
- pb->datapath->tunnel_key);
if (ldp && ldp->localnet_port) {
ofctrl_remove_flows(flow_table, &ldp->localnet_port->header_.uuid);
physical_eval_port_binding(p_ctx, ldp->localnet_port, flow_table);
}
}
+ if (ldp) {
+ bool multichassis_state_changed = (
+ !!pb->additional_chassis ==
+ !!shash_find(&ldp->multichassis_ports, pb->logical_port)
+ );
+ if (multichassis_state_changed) {
+ if (pb->additional_chassis) {
+ add_local_datapath_multichassis_port(
+ ldp, pb->logical_port, pb);
+ } else {
+ remove_local_datapath_multichassis_port(
+ ldp, pb->logical_port);
+ }
+
+ struct sbrec_port_binding *target =
+ sbrec_port_binding_index_init_row(
+ p_ctx->sbrec_port_binding_by_datapath);
+ sbrec_port_binding_index_set_datapath(target, ldp->datapath);
+
+ const struct sbrec_port_binding *port;
+ SBREC_PORT_BINDING_FOR_EACH_EQUAL (
+ port, target, p_ctx->sbrec_port_binding_by_datapath) {
+ ofctrl_remove_flows(flow_table, &port->header_.uuid);
+ physical_eval_port_binding(p_ctx, port, flow_table);
+ }
+ sbrec_port_binding_index_destroy_row(target);
+ }
+ }
+
if (!removed) {
physical_eval_port_binding(p_ctx, pb, flow_table);
if (!strcmp(pb->type, "patch")) {
diff --git a/controller/physical.h b/controller/physical.h
index ee4b1ae1f..1b8f1ea55 100644
--- a/controller/physical.h
+++ b/controller/physical.h
@@ -45,6 +45,7 @@ struct local_nonvif_data;
struct physical_ctx {
struct ovsdb_idl_index *sbrec_port_binding_by_name;
+ struct ovsdb_idl_index *sbrec_port_binding_by_datapath;
const struct sbrec_port_binding_table *port_binding_table;
const struct sbrec_multicast_group_table *mc_group_table;
const struct ovsrec_bridge *br_int;
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 428863293..2fcf91bc9 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -29,10 +29,12 @@
#include "lport.h"
#include "mac-learn.h"
#include "nx-match.h"
+#include "ofctrl.h"
#include "latch.h"
#include "lib/packets.h"
#include "lib/sset.h"
#include "openvswitch/ofp-actions.h"
+#include "openvswitch/ofp-flow.h"
#include "openvswitch/ofp-msgs.h"
#include "openvswitch/ofp-packet.h"
#include "openvswitch/ofp-print.h"
@@ -152,8 +154,8 @@ VLOG_DEFINE_THIS_MODULE(pinctrl);
* and pinctrl_run().
* 'pinctrl_handler_seq' is used by pinctrl_run() to
* wake up pinctrl_handler thread from poll_block() if any changes happened
- * in 'send_garp_rarp_data', 'ipv6_ras' and 'buffered_mac_bindings'
- * structures.
+ * in 'send_garp_rarp_data', 'ipv6_ras', 'ports_to_activate_in_db' and
+ * 'buffered_mac_bindings' structures.
*
* 'pinctrl_main_seq' is used by pinctrl_handler() thread to wake up
* the main thread from poll_block() when mac bindings/igmp groups need to
@@ -198,6 +200,17 @@ static void wait_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn);
static void send_mac_binding_buffered_pkts(struct rconn *swconn)
OVS_REQUIRES(pinctrl_mutex);
+static void pinctrl_rarp_activation_strategy_handler(const struct match *md);
+
+static void init_activated_ports(void);
+static void destroy_activated_ports(void);
+static void wait_activated_ports(void);
+static void run_activated_ports(
+ struct ovsdb_idl_txn *ovnsb_idl_txn,
+ struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
+ struct ovsdb_idl_index *sbrec_port_binding_by_name,
+ const struct sbrec_chassis *chassis);
+
static void init_send_garps_rarps(void);
static void destroy_send_garps_rarps(void);
static void send_garp_rarp_wait(long long int send_garp_rarp_time);
@@ -522,6 +535,7 @@ pinctrl_init(void)
init_ipv6_ras();
init_ipv6_prefixd();
init_buffered_packets_map();
+ init_activated_ports();
init_event_table();
ip_mcast_snoop_init();
init_put_vport_bindings();
@@ -3269,6 +3283,12 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
ovs_mutex_unlock(&pinctrl_mutex);
break;
+ case ACTION_OPCODE_ACTIVATION_STRATEGY_RARP:
+ ovs_mutex_lock(&pinctrl_mutex);
+ pinctrl_rarp_activation_strategy_handler(&pin.flow_metadata);
+ ovs_mutex_unlock(&pinctrl_mutex);
+ break;
+
default:
VLOG_WARN_RL(&rl, "unrecognized packet-in opcode %"PRIu32,
ntohl(ah->opcode));
@@ -3434,11 +3454,11 @@ pinctrl_handler(void *arg_)
ip_mcast_querier_run(swconn, &send_mcast_query_time);
}
- }
- ovs_mutex_lock(&pinctrl_mutex);
- svc_monitors_run(swconn, &svc_monitors_next_run_time);
- ovs_mutex_unlock(&pinctrl_mutex);
+ ovs_mutex_lock(&pinctrl_mutex);
+ svc_monitors_run(swconn, &svc_monitors_next_run_time);
+ ovs_mutex_unlock(&pinctrl_mutex);
+ }
rconn_run_wait(swconn);
rconn_recv_wait(swconn);
@@ -3533,6 +3553,8 @@ pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
bfd_monitor_run(ovnsb_idl_txn, bfd_table, sbrec_port_binding_by_name,
chassis, active_tunnels);
run_put_fdbs(ovnsb_idl_txn, sbrec_fdb_by_dp_key_mac);
+ run_activated_ports(ovnsb_idl_txn, sbrec_datapath_binding_by_key,
+ sbrec_port_binding_by_key, chassis);
ovs_mutex_unlock(&pinctrl_mutex);
}
@@ -4030,12 +4052,15 @@ prepare_ipv6_ras(const struct shash *local_active_ports_ras,
void
pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn)
{
+ ovs_mutex_lock(&pinctrl_mutex);
wait_put_mac_bindings(ovnsb_idl_txn);
wait_controller_event(ovnsb_idl_txn);
wait_put_vport_bindings(ovnsb_idl_txn);
int64_t new_seq = seq_read(pinctrl_main_seq);
seq_wait(pinctrl_main_seq, new_seq);
wait_put_fdbs(ovnsb_idl_txn);
+ wait_activated_ports();
+ ovs_mutex_unlock(&pinctrl_mutex);
}
/* Called by ovn-controller. */
@@ -4050,6 +4075,7 @@ pinctrl_destroy(void)
destroy_ipv6_ras();
destroy_ipv6_prefixd();
destroy_buffered_packets_map();
+ destroy_activated_ports();
event_table_destroy();
destroy_put_mac_bindings();
destroy_put_vport_bindings();
@@ -7727,6 +7753,152 @@ pinctrl_handle_svc_check(struct rconn *swconn, const struct flow *ip_flow,
}
}
+static struct ovs_list ports_to_activate_in_db = OVS_LIST_INITIALIZER(
+ &ports_to_activate_in_db);
+static struct ovs_list ports_to_activate_in_engine = OVS_LIST_INITIALIZER(
+ &ports_to_activate_in_engine);
+
+struct ovs_list *
+get_ports_to_activate_in_engine(void)
+{
+ ovs_mutex_lock(&pinctrl_mutex);
+ if (ovs_list_is_empty(&ports_to_activate_in_engine)) {
+ ovs_mutex_unlock(&pinctrl_mutex);
+ return NULL;
+ }
+
+ struct ovs_list *ap = xmalloc(sizeof *ap);
+ ovs_list_init(ap);
+ struct activated_port *pp;
+ LIST_FOR_EACH (pp, list, &ports_to_activate_in_engine) {
+ struct activated_port *new = xmalloc(sizeof *new);
+ new->dp_key = pp->dp_key;
+ new->port_key = pp->port_key;
+ ovs_list_push_front(ap, &new->list);
+ }
+ ovs_mutex_unlock(&pinctrl_mutex);
+ return ap;
+}
+
+static void
+init_activated_ports(void)
+ OVS_REQUIRES(pinctrl_mutex)
+{
+ ovs_list_init(&ports_to_activate_in_db);
+ ovs_list_init(&ports_to_activate_in_engine);
+}
+
+static void
+destroy_activated_ports(void)
+ OVS_REQUIRES(pinctrl_mutex)
+{
+ struct activated_port *pp;
+ LIST_FOR_EACH_POP (pp, list, &ports_to_activate_in_db) {
+ free(pp);
+ }
+ LIST_FOR_EACH_POP (pp, list, &ports_to_activate_in_engine) {
+ free(pp);
+ }
+}
+
+static void
+wait_activated_ports(void)
+ OVS_REQUIRES(pinctrl_mutex)
+{
+ if (!ovs_list_is_empty(&ports_to_activate_in_engine)) {
+ poll_immediate_wake();
+ }
+}
+
+bool pinctrl_is_port_activated(int64_t dp_key, int64_t port_key)
+{
+ const struct activated_port *pp;
+ ovs_mutex_lock(&pinctrl_mutex);
+ LIST_FOR_EACH (pp, list, &ports_to_activate_in_db) {
+ if (pp->dp_key == dp_key && pp->port_key == port_key) {
+ ovs_mutex_unlock(&pinctrl_mutex);
+ return true;
+ }
+ }
+ LIST_FOR_EACH (pp, list, &ports_to_activate_in_engine) {
+ if (pp->dp_key == dp_key && pp->port_key == port_key) {
+ ovs_mutex_unlock(&pinctrl_mutex);
+ return true;
+ }
+ }
+ ovs_mutex_unlock(&pinctrl_mutex);
+ return false;
+}
+
+static void
+run_activated_ports(struct ovsdb_idl_txn *ovnsb_idl_txn,
+ struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
+ struct ovsdb_idl_index *sbrec_port_binding_by_key,
+ const struct sbrec_chassis *chassis)
+ OVS_REQUIRES(pinctrl_mutex)
+{
+ if (!ovnsb_idl_txn) {
+ return;
+ }
+
+ struct activated_port *pp;
+ LIST_FOR_EACH_SAFE (pp, list, &ports_to_activate_in_db) {
+ const struct sbrec_port_binding *pb = lport_lookup_by_key(
+ sbrec_datapath_binding_by_key, sbrec_port_binding_by_key,
+ pp->dp_key, pp->port_key);
+ if (!pb || lport_is_activated_by_activation_strategy(pb, chassis)) {
+ ovs_list_remove(&pp->list);
+ free(pp);
+ continue;
+ }
+ const char *activated_chassis = smap_get(
+ &pb->options, "additional-chassis-activated");
+ char *activated_str;
+ if (activated_chassis) {
+ activated_str = xasprintf(
+ "%s,%s", activated_chassis, chassis->name);
+ sbrec_port_binding_update_options_setkey(
+ pb, "additional-chassis-activated", activated_str);
+ free(activated_str);
+ } else {
+ sbrec_port_binding_update_options_setkey(
+ pb, "additional-chassis-activated", chassis->name);
+ }
+ }
+}
+
+void
+tag_port_as_activated_in_engine(struct activated_port *ap) {
+ ovs_mutex_lock(&pinctrl_mutex);
+ struct activated_port *pp;
+ LIST_FOR_EACH_SAFE (pp, list, &ports_to_activate_in_engine) {
+ if (pp->dp_key == ap->dp_key && pp->port_key == ap->port_key) {
+ ovs_list_remove(&pp->list);
+ free(pp);
+ }
+ }
+ ovs_mutex_unlock(&pinctrl_mutex);
+}
+
+static void
+pinctrl_rarp_activation_strategy_handler(const struct match *md)
+ OVS_REQUIRES(pinctrl_mutex)
+{
+ /* Tag the port as activated in-memory. */
+ struct activated_port *pp = xmalloc(sizeof *pp);
+ pp->port_key = md->flow.regs[MFF_LOG_INPORT - MFF_REG0];
+ pp->dp_key = ntohll(md->flow.metadata);
+ ovs_list_push_front(&ports_to_activate_in_db, &pp->list);
+
+ pp = xmalloc(sizeof *pp);
+ pp->port_key = md->flow.regs[MFF_LOG_INPORT - MFF_REG0];
+ pp->dp_key = ntohll(md->flow.metadata);
+ ovs_list_push_front(&ports_to_activate_in_engine, &pp->list);
+
+ /* Notify main thread on pending additional-chassis-activated updates. */
+ notify_pinctrl_main();
+}
+
static struct hmap put_fdbs;
/* MAC learning (fdb) related functions. Runs within the main
diff --git a/controller/pinctrl.h b/controller/pinctrl.h
index 88f18e983..d4f52e94d 100644
--- a/controller/pinctrl.h
+++ b/controller/pinctrl.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include "lib/sset.h"
+#include "openvswitch/list.h"
#include "openvswitch/meta-flow.h"
struct hmap;
@@ -33,6 +34,7 @@ struct sbrec_dns_table;
struct sbrec_controller_event_table;
struct sbrec_service_monitor_table;
struct sbrec_bfd_table;
+struct sbrec_port_binding;
void pinctrl_init(void);
void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
@@ -56,4 +58,14 @@ void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
void pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn);
void pinctrl_destroy(void);
void pinctrl_set_br_int_name(char *br_int_name);
+
+struct activated_port {
+ uint32_t dp_key;
+ uint32_t port_key;
+ struct ovs_list list;
+};
+
+void tag_port_as_activated_in_engine(struct activated_port *ap);
+struct ovs_list *get_ports_to_activate_in_engine(void);
+bool pinctrl_is_port_activated(int64_t dp_key, int64_t port_key);
#endif /* controller/pinctrl.h */
diff --git a/debian/changelog b/debian/changelog
index caef68452..1c2de53bd 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+OVN (22.06.1-1) unstable; urgency=low
+ [ OVN team ]
+ * New upstream version
+
+ -- OVN team <dev@openvswitch.org> Fri, 03 Jun 2022 11:54:08 -0400
+
ovn (22.06.0-1) unstable; urgency=low
* New upstream version
diff --git a/ic/ovn-ic.c b/ic/ovn-ic.c
index 8511cb9ac..95a5ff0de 100644
--- a/ic/ovn-ic.c
+++ b/ic/ovn-ic.c
@@ -925,7 +925,12 @@ parse_route(const char *s_prefix, const char *s_nexthop,
}
unsigned int nlen;
- return ip46_parse_cidr(s_nexthop, nexthop, &nlen);
+ if (!ip46_parse_cidr(s_nexthop, nexthop, &nlen)) {
+ return false;
+ }
+
+ /* Do not learn routes with link-local next hop. */
+ return !in6_is_lla(nexthop);
}
/* Return false if can't be added due to bad format. */
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index 1ae496960..33c319f1c 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -683,6 +683,9 @@ enum action_opcode {
/* put_fdb(inport, eth.src).
*/
ACTION_OPCODE_PUT_FDB,
+
+ /* activation_strategy_rarp() */
+ ACTION_OPCODE_ACTIVATION_STRATEGY_RARP,
};
/* Header. */
diff --git a/lib/extend-table.c b/lib/extend-table.c
index 4c3c4fac2..ebb1a054c 100644
--- a/lib/extend-table.c
+++ b/lib/extend-table.c
@@ -40,13 +40,17 @@ ovn_extend_table_init(struct ovn_extend_table *table)
}
static struct ovn_extend_table_info *
-ovn_extend_table_info_alloc(const char *name, uint32_t id, bool is_new_id,
+ovn_extend_table_info_alloc(const char *name, uint32_t id,
+ struct ovn_extend_table_info *peer,
uint32_t hash)
{
struct ovn_extend_table_info *e = xmalloc(sizeof *e);
e->name = xstrdup(name);
e->table_id = id;
- e->new_table_id = is_new_id;
+ e->peer = peer;
+ if (peer) {
+ peer->peer = e;
+ }
e->hmap_node.hash = hash;
hmap_init(&e->references);
return e;
@@ -184,9 +188,10 @@ ovn_extend_table_clear(struct ovn_extend_table *table, bool existing)
/* Clear the target table. */
HMAP_FOR_EACH_SAFE (g, hmap_node, target) {
hmap_remove(target, &g->hmap_node);
- /* Don't unset bitmap for desired group_info if the group_id
- * was not freshly reserved. */
- if (existing || g->new_table_id) {
+ if (g->peer) {
+ g->peer->peer = NULL;
+ } else {
+ /* Unset the bitmap because the peer is deleted already. */
bitmap_set0(table->table_ids, g->table_id);
}
ovn_extend_table_info_destroy(g);
@@ -209,11 +214,15 @@ void
ovn_extend_table_remove_existing(struct ovn_extend_table *table,
struct ovn_extend_table_info *existing)
{
- /* Remove 'existing' from 'groups->existing' */
+ /* Remove 'existing' from 'table->existing' */
hmap_remove(&table->existing, &existing->hmap_node);
- /* Dealloc group_id. */
- bitmap_set0(table->table_ids, existing->table_id);
+ if (existing->peer) {
+ existing->peer->peer = NULL;
+ } else {
+ /* Dealloc the ID. */
+ bitmap_set0(table->table_ids, existing->table_id);
+ }
ovn_extend_table_info_destroy(existing);
}
@@ -230,7 +239,9 @@ ovn_extend_table_delete_desired(struct ovn_extend_table *table,
VLOG_DBG("%s: %s, "UUID_FMT, __func__,
e->name, UUID_ARGS(&l->lflow_uuid));
hmap_remove(&table->desired, &e->hmap_node);
- if (e->new_table_id) {
+ if (e->peer) {
+ e->peer->peer = NULL;
+ } else {
bitmap_set0(table->table_ids, e->table_id);
}
ovn_extend_table_info_destroy(e);
@@ -254,17 +265,6 @@ ovn_extend_table_remove_desired(struct ovn_extend_table *table,
ovn_extend_table_delete_desired(table, l);
}
-static struct ovn_extend_table_info*
-ovn_extend_info_clone(struct ovn_extend_table_info *source)
-{
- struct ovn_extend_table_info *clone =
- ovn_extend_table_info_alloc(source->name,
- source->table_id,
- source->new_table_id,
- source->hmap_node.hash);
- return clone;
-}
-
void
ovn_extend_table_sync(struct ovn_extend_table *table)
{
@@ -273,11 +273,13 @@ ovn_extend_table_sync(struct ovn_extend_table *table)
/* Copy the contents of desired to existing. */
HMAP_FOR_EACH_SAFE (desired, hmap_node, &table->desired) {
if (!ovn_extend_table_lookup(&table->existing, desired)) {
- desired->new_table_id = false;
- struct ovn_extend_table_info *clone =
- ovn_extend_info_clone(desired);
- hmap_insert(&table->existing, &clone->hmap_node,
- clone->hmap_node.hash);
+ struct ovn_extend_table_info *existing =
+ ovn_extend_table_info_alloc(desired->name,
+ desired->table_id,
+ desired,
+ desired->hmap_node.hash);
+ hmap_insert(&table->existing, &existing->hmap_node,
+ existing->hmap_node.hash);
}
}
}
@@ -289,7 +291,7 @@ ovn_extend_table_assign_id(struct ovn_extend_table *table, const char *name,
struct uuid lflow_uuid)
{
uint32_t table_id = 0, hash;
- struct ovn_extend_table_info *table_info;
+ struct ovn_extend_table_info *table_info, *existing_info;
hash = hash_string(name, 0);
@@ -307,17 +309,18 @@ ovn_extend_table_assign_id(struct ovn_extend_table *table, const char *name,
/* Check whether we already have an installed entry for this
* combination. */
+ existing_info = NULL;
HMAP_FOR_EACH_WITH_HASH (table_info, hmap_node, hash, &table->existing) {
if (!strcmp(table_info->name, name)) {
- table_id = table_info->table_id;
+ existing_info = table_info;
+ table_id = existing_info->table_id;
+ break;
}
}
- bool new_table_id = false;
- if (!table_id) {
- /* Reserve a new group_id. */
+ if (!existing_info) {
+ /* Reserve a new id. */
table_id = bitmap_scan(table->table_ids, 0, 1, MAX_EXT_TABLE_ID + 1);
- new_table_id = true;
}
if (table_id == MAX_EXT_TABLE_ID + 1) {
@@ -327,7 +330,7 @@ ovn_extend_table_assign_id(struct ovn_extend_table *table, const char *name,
}
bitmap_set1(table->table_ids, table_id);
- table_info = ovn_extend_table_info_alloc(name, table_id, new_table_id,
+ table_info = ovn_extend_table_info_alloc(name, table_id, existing_info,
hash);
hmap_insert(&table->desired,
diff --git a/lib/extend-table.h b/lib/extend-table.h
index 6d81877af..b43a146b4 100644
--- a/lib/extend-table.h
+++ b/lib/extend-table.h
@@ -28,8 +28,12 @@
* such as the Group Table or Meter Table. */
struct ovn_extend_table {
unsigned long *table_ids; /* Used as a bitmap with value set
- * for allocated group ids in either
- * desired or existing. */
+ * for allocated ids in either desired or
+ * existing (or both). If the same "name"
+ * exists in both desired and existing tables,
+ * they must share the same ID. The "peer"
+ * pointer would tell if the ID is still used by
+ * the same item in the peer table. */
struct hmap desired;
struct hmap lflow_to_desired; /* Index for looking up desired table
* items from given lflow uuid, with
@@ -48,8 +52,13 @@ struct ovn_extend_table_info {
struct hmap_node hmap_node;
char *name; /* Name for the table entity. */
uint32_t table_id;
- bool new_table_id; /* 'True' if 'table_id' was reserved from
- * ovn_extend_table's 'table_ids' bitmap. */
+ struct ovn_extend_table_info *peer; /* The extend tables exist as pairs,
+ one for desired items and one for
+ existing items. "peer" maintains the
+ link between a pair of items in
+ these tables. If "peer" is NULL, it
+ means the counterpart is not created
+ yet or deleted already. */
struct hmap references; /* The lflows that are using this item, with
* ovn_extend_table_lflow_ref nodes. Only useful
* for items in ovn_extend_table.desired. */
diff --git a/northd/northd.c b/northd/northd.c
index f120c2366..f6b84c318 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -3470,6 +3470,16 @@ ovn_port_update_sbrec(struct northd_input *input_data,
smap_add(&options, "vlan-passthru", "true");
}
+ /* Retain activated chassis flags. */
+ if (op->sb->requested_additional_chassis) {
+ const char *activated_str = smap_get(
+ &op->sb->options, "additional-chassis-activated");
+ if (activated_str) {
+ smap_add(&options, "additional-chassis-activated",
+ activated_str);
+ }
+ }
+
sbrec_port_binding_set_options(op->sb, &options);
smap_destroy(&options);
if (ovn_is_known_nb_lsp_type(op->nbsp->type)) {
@@ -3832,6 +3842,13 @@ ovn_lb_svc_create(struct ovsdb_idl_txn *ovnsb_txn, struct ovn_northd_lb *lb,
backend_nb->svc_mon_src_ip);
}
+ if ((!op->sb->n_up || !op->sb->up[0])
+ && mon_info->sbrec_mon->status
+ && !strcmp(mon_info->sbrec_mon->status, "online")) {
+ sbrec_service_monitor_set_status(mon_info->sbrec_mon,
+ "offline");
+ }
+
backend_nb->sbrec_monitor = mon_info->sbrec_mon;
mon_info->required = true;
}
@@ -10976,6 +10993,9 @@ build_neigh_learning_flows_for_lrouter(
copp_meter_get(COPP_ARP, od->nbr->copp,
meter_groups));
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 95,
+ "nd_ns && (ip6.src == 0 || nd.sll == 0)", "next;");
+
ovn_lflow_metered(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 95,
"nd_na && nd.tll == 0",
"put_nd(inport, nd.target, eth.src); next;",
@@ -12021,6 +12041,7 @@ build_gateway_redirect_flows_for_lrouter(
}
for (size_t i = 0; i < od->n_l3dgw_ports; i++) {
const struct ovsdb_idl_row *stage_hint = NULL;
+ bool add_def_flow = true;
if (od->l3dgw_ports[i]->nbrp) {
stage_hint = &od->l3dgw_ports[i]->nbrp->header_;
@@ -12039,22 +12060,42 @@ build_gateway_redirect_flows_for_lrouter(
ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 50,
ds_cstr(match), ds_cstr(actions),
stage_hint);
- }
+ for (int j = 0; j < od->n_nat_entries; j++) {
+ const struct ovn_nat *nat = &od->nat_entries[j];
- for (int i = 0; i < od->n_nat_entries; i++) {
- const struct ovn_nat *nat = &od->nat_entries[i];
+ if (!lrouter_nat_is_stateless(nat->nb) ||
+ strcmp(nat->nb->type, "dnat_and_snat") ||
+ (!nat->nb->allowed_ext_ips && !nat->nb->exempted_ext_ips)) {
+ continue;
+ }
- if (!lrouter_nat_is_stateless(nat->nb) ||
- strcmp(nat->nb->type, "dnat_and_snat")) {
- continue;
- }
+ struct ds match_ext = DS_EMPTY_INITIALIZER;
+ struct nbrec_address_set *as = nat->nb->allowed_ext_ips
+ ? nat->nb->allowed_ext_ips : nat->nb->exempted_ext_ips;
+ ds_put_format(&match_ext, "%s && ip%s.src == $%s",
+ ds_cstr(match), nat_entry_is_v6(nat) ? "6" : "4",
+ as->name);
- ds_clear(match);
- ds_put_format(match, "ip && ip%s.dst == %s",
- nat_entry_is_v6(nat) ? "6" : "4",
- nat->nb->external_ip);
- ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 100,
- ds_cstr(match), "drop;");
+ if (nat->nb->allowed_ext_ips) {
+ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT,
+ 75, ds_cstr(&match_ext),
+ ds_cstr(actions), stage_hint);
+ if (add_def_flow) {
+ ds_clear(&match_ext);
+ ds_put_format(&match_ext, "ip && ip%s.dst == %s",
+ nat_entry_is_v6(nat) ? "6" : "4",
+ nat->nb->external_ip);
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 70,
+ ds_cstr(&match_ext), "drop;");
+ add_def_flow = false;
+ }
+ } else if (nat->nb->exempted_ext_ips) {
+ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT,
+ 75, ds_cstr(&match_ext), "drop;",
+ stage_hint);
+ }
+ ds_destroy(&match_ext);
+ }
}
/* Packets are allowed by default. */
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index 1f7022490..32858c0b4 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -2323,6 +2323,12 @@ next;
to learn the neighbor.
</li>
+ <li>
+ A priority-95 flow with the match <code>nd_ns &&
+ (ip6.src == 0 || nd.sll == 0)</code> and applies the action
+ <code>next;</code>
+ </li>
+
<li>
A priority-90 flow with the match <code>arp</code> and
applies the action
@@ -3018,8 +3024,7 @@ icmp6 {
<code>ip && ip6.dst == <var>B</var></code>
with an action <code>ct_snat; </code>. If the NAT rule is of type
dnat_and_snat and has <code>stateless=true</code> in the
- options, then the action would be <code>ip4/6.dst=
- (<var>B</var>)</code>.
+ options, then the action would be <code>next;</code>.
</p>
<p>
@@ -3059,7 +3064,7 @@ icmp6 {
action <code>ct_snat_in_czone;</code> to unSNAT in the common
zone. If the NAT rule is of type dnat_and_snat and has
<code>stateless=true</code> in the options, then the action
- would be <code>ip4/6.dst=(<var>B</var>)</code>.
+ would be <code>next;</code>.
</p>
<p>
@@ -4217,6 +4222,26 @@ icmp6 {
external ip and <var>D</var> is NAT external mac.
</li>
+ <li>
+ For each <code>dnat_and_snat</code> NAT rule with
+ <code>stateless=true</code> and <code>allowed_ext_ips</code>
+ configured, a priority-75 flow is programmed with match
+ <code>ip4.dst == <var>B</var></code> and action
+ <code>outport = <var>CR</var>; next;</code> where <var>B</var>
+ is the NAT rule external IP and <var>CR</var> is the
+ <code>chassisredirect</code> port representing the instance
+ of the logical router distributed gateway port on the
+ gateway chassis. Moreover a priority-70 flow is programmed
+ with same match and action <code>drop;</code>.
+ For each <code>dnat_and_snat</code> NAT rule with
+ <code>stateless=true</code> and <code>exempted_ext_ips</code>
+ configured, a priority-75 flow is programmed with match
+ <code>ip4.dst == <var>B</var></code> and action
+ <code>drop;</code> where <var>B</var> is the NAT rule
+ external IP.
+ A similar flow is added for IPv6 traffic.
+ </li>
+
<li>
For each NAT rule in the OVN Northbound database that can
be handled in a distributed manner, a priority-80 logical flow
@@ -4415,8 +4440,7 @@ nd_ns {
is the logical router gateway port, with an action
<code>ct_dnat_in_czone;</code>. If the NAT rule is of type
dnat_and_snat and has <code>stateless=true</code> in the
- options, then the action would be <code>ip4/6.src=
- (<var>B</var>)</code>.
+ options, then the action would be <code>next;</code>.
</p>
<p>
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index e4e980720..ab28756af 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -107,7 +107,10 @@ static const char *rbac_port_binding_auth[] =
static const char *rbac_port_binding_update[] =
{"chassis", "additional_chassis",
"encap", "additional_encap",
- "up", "virtual_parent"};
+ "up", "virtual_parent",
+ /* NOTE: we only need to update the additional-chassis-activated key,
+ * but RBAC_Role doesn't support mutate operation for subkeys. */
+ "options"};
static const char *rbac_mac_binding_auth[] =
{""};
diff --git a/ovn-nb.xml b/ovn-nb.xml
index c197f431f..47fd5a544 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -610,6 +610,7 @@
Determines whether unregistered multicast traffic should be flooded
or not. Only applicable if
<ref column="other_config" key="mcast_snoop"/> is enabled.
+ Default: <code>false</code>.
</column>
<column name="other_config" key="mcast_table_size"
type='{"type": "integer", "minInteger": 1, "maxInteger": 32766}'>
@@ -1045,6 +1046,17 @@
</p>
</column>
+ <column name="options" key="activation-strategy">
+ If used with multiple chassis set in
+ <ref column="requested-chassis"/>, specifies an activation strategy
+ for all additional chassis. By default, no activation strategy is
+ used, meaning additional port locations are immediately available for
+ use. When set to "rarp", the port is blocked for ingress and egress
+ communication until a RARP packet is sent from a new location. The
+ "rarp" strategy is useful in live migration scenarios for virtual
+ machines.
+ </column>
+
<column name="options" key="iface-id-ver">
If set, this port will be bound by <code>ovn-controller</code>
only if this same key and value is configured in the
@@ -1129,12 +1141,13 @@
type='{"type": "boolean"}'>
If set to <code>true</code>, multicast packets (except reports) are
unconditionally forwarded to the specific port.
+ Default: <code>false</code>.
</column>
<column name="options" key="mcast_flood_reports"
type='{"type": "boolean"}'>
If set to <code>true</code>, multicast reports are unconditionally
- forwarded to the specific port.
+ forwarded to the specific port. Default: <code>false</code>.
</column>
</group>
@@ -2900,6 +2913,10 @@
has <ref table="Logical_Router" column="options"/>:mcast_relay set
to <code>true</code>.
</p>
+
+ <p>
+ Default: <code>false</code>.
+ </p>
</column>
<column name="options" key="requested-tnl-key"
diff --git a/ovn-sb.xml b/ovn-sb.xml
index 9f47a037e..49e851e2a 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -3374,6 +3374,21 @@ tcp.flags = RST;
</p>
</column>
+ <column name="options" key="activation-strategy">
+ If used with multiple chassis set in <ref column="requested-chassis"/>,
+ specifies an activation strategy for all additional chassis. By
+ default, no activation strategy is used, meaning additional port
+ locations are immediately available for use. When set to "rarp", the
+ port is blocked for ingress and egress communication until a RARP
+ packet is sent from a new location. The "rarp" strategy is useful
+ in live migration scenarios for virtual machines.
+ </column>
+
+ <column name="options" key="additional-chassis-activated">
+ When <ref column="activation-strategy"/> is set, this option indicates
+ that the port was activated using the strategy specified.
+ </column>
+
<column name="options" key="iface-id-ver">
If set, this port will be bound by <code>ovn-controller</code>
only if this same key and value is configured in the
diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at
index b8db342b9..f71977291 100644
--- a/tests/ovn-controller.at
+++ b/tests/ovn-controller.at
@@ -2121,9 +2121,20 @@ check ovs-vsctl -- add-port br-int hv1-vif1 -- \
check ovn-nbctl ls-add ls1
-check ovn-nbctl --wait=hv lsp-add ls1 ls1-lp1 \
+check ovn-nbctl lsp-add ls1 ls1-lp1 \
-- lsp-set-addresses ls1-lp1 "f0:00:00:00:00:01 10.1.2.3"
+check ovn-nbctl lb-add lb1 1.1.1.1 10.1.2.3 \
+-- ls-lb-add ls1 lb1
+
+check ovn-nbctl lb-add lb2 2.2.2.2 10.1.2.4 \
+-- ls-lb-add ls1 lb2
+
+check ovn-nbctl --wait=hv sync
+# There should be 2 group IDs allocated
+AT_CHECK([ovn-appctl -t ovn-controller group-table-list | awk '{print $2}' | sort | uniq | wc -l], [0], [2
+])
+
# Set 5 seconds wait time before clearing OVS flows.
check ovs-vsctl set open . external_ids:ovn-ofctrl-wait-before-clear=5000
@@ -2156,6 +2167,19 @@ lflow_run_2=$(ovn-appctl -t ovn-controller coverage/read-counter lflow_run)
AT_CHECK_UNQUOTED([echo $lflow_run_1], [0], [$lflow_run_2
])
+# Restart OVS this time, and wait until flows are reinstalled
+OVS_APP_EXIT_AND_WAIT([ovs-vswitchd])
+start_daemon ovs-vswitchd --enable-dummy=system -vvconn -vofproto_dpif -vunixctl
+OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4], [0], [ignore])
+
+check ovn-nbctl --wait=hv lb-add lb3 2.2.2.2 10.1.2.5 \
+-- ls-lb-add ls1 lb3
+
+# There should be 3 group IDs allocated (this is to ensure the group ID
+# allocation is correct after ofctrl state reset.
+AT_CHECK([ovn-appctl -t ovn-controller group-table-list | awk '{print $2}' | sort | uniq | wc -l], [0], [3
+])
+
OVN_CLEANUP([hv1])
AT_CLEANUP
diff --git a/tests/ovn-ic.at b/tests/ovn-ic.at
index 05bd3e9a6..89f223562 100644
--- a/tests/ovn-ic.at
+++ b/tests/ovn-ic.at
@@ -492,6 +492,56 @@ OVN_CLEANUP_IC([az1], [az2])
AT_CLEANUP
])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-ic -- route sync -- IPv6 route tables])
+AT_KEYWORDS([IPv6-route-sync])
+
+ovn_init_ic_db
+ovn-ic-nbctl ts-add ts1
+
+for i in 1 2; do
+ ovn_start az$i
+ ovn_as az$i
+
+ # Enable route learning at AZ level
+ ovn-nbctl set nb_global . options:ic-route-learn=true
+ # Enable route advertising at AZ level
+ ovn-nbctl set nb_global . options:ic-route-adv=true
+
+ # Create LRP and connect to TS
+ ovn-nbctl lr-add lr$i
+ ovn-nbctl lrp-add lr$i lrp-lr$i-ts1 aa:aa:aa:aa:aa:0$i 2001:db8:1::$i/64
+ ovn-nbctl lsp-add ts1 lsp-ts1-lr$i \
+ -- lsp-set-addresses lsp-ts1-lr$i router \
+ -- lsp-set-type lsp-ts1-lr$i router \
+ -- lsp-set-options lsp-ts1-lr$i router-port=lrp-lr$i-ts1
+
+ ovn-nbctl lrp-add lr$i lrp-lr$i-p$i 00:00:00:00:00:0$i 2002:db8:1::$i/64
+done
+
+for i in 1 2; do
+ OVS_WAIT_UNTIL([ovn_as az$i ovn-nbctl lr-route-list lr$i | grep learned])
+done
+
+AT_CHECK([ovn_as az1 ovn-nbctl lr-route-list lr1 | awk '/learned/{print $1, $2}'], [0], [dnl
+2002:db8:1::/64 2001:db8:1::2
+])
+
+# Do not learn routes from link-local nexthops
+for i in 1 2; do
+ ovn_as az$i
+ ovn-nbctl lrp-del lrp-lr$i-ts1
+ ovn-nbctl lrp-add lr$i lrp-lr$i-ts1 aa:aa:aa:aa:aa:0$i 169.254.100.$i/24
+done
+
+OVS_WAIT_WHILE([ovn_as az1 ovn-nbctl lr-route-list lr1 | grep learned])
+AT_CHECK([ovn_as az1 ovn-nbctl lr-route-list lr1 | grep -q learned], [1])
+
+OVN_CLEANUP_IC([az1], [az2])
+
+AT_CLEANUP
+])
+
OVN_FOR_EACH_NORTHD([
AT_SETUP([ovn-ic -- route sync -- route tables])
diff --git a/tests/ovn-ipsec.at b/tests/ovn-ipsec.at
index 4c600a9f2..10ef97878 100644
--- a/tests/ovn-ipsec.at
+++ b/tests/ovn-ipsec.at
@@ -44,15 +44,18 @@ ovs-vsctl \
# Enable IPsec
ovn-nbctl set nb_global . ipsec=true
+ovn-nbctl set nb_global . options:ipsec_encapsulation=true
check ovn-nbctl --wait=hv sync
AT_CHECK([as hv2 ovs-vsctl get Interface ovn-hv1-0 options:remote_ip | tr -d '"\n'], [0], [192.168.0.1])
AT_CHECK([as hv2 ovs-vsctl get Interface ovn-hv1-0 options:local_ip | tr -d '"\n'], [0], [192.168.0.2])
AT_CHECK([as hv2 ovs-vsctl get Interface ovn-hv1-0 options:remote_name | tr -d '\n'], [0], [hv1])
+AT_CHECK([as hv2 ovs-vsctl get Interface ovn-hv1-0 options:ipsec_encapsulation | tr -d '\n'], [0], [yes])
AT_CHECK([as hv1 ovs-vsctl get Interface ovn-hv2-0 options:remote_ip | tr -d '"\n'], [0], [192.168.0.2])
AT_CHECK([as hv1 ovs-vsctl get Interface ovn-hv2-0 options:local_ip | tr -d '"\n'], [0], [192.168.0.1])
AT_CHECK([as hv1 ovs-vsctl get Interface ovn-hv2-0 options:remote_name | tr -d '\n'], [0], [hv2])
+AT_CHECK([as hv1 ovs-vsctl get Interface ovn-hv2-0 options:ipsec_encapsulation | tr -d '\n'], [0], [yes])
AT_CLEANUP
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index a071b3689..773904f95 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -1213,6 +1213,13 @@ ovn-nbctl ls-add sw1
ovn-nbctl --wait=sb lsp-add sw1 sw1-p1 -- lsp-set-addresses sw1-p1 \
"02:00:00:00:00:03 20.0.0.3"
+# service_monitors state online requires corresponding port_binding to be "up"
+ovn-sbctl chassis-add hv1 geneve 127.0.0.1
+ovn-sbctl lsp-bind sw0-p1 hv1
+ovn-sbctl lsp-bind sw1-p1 hv1
+wait_row_count nb:Logical_Switch_Port 1 name=sw0-p1 'up=true'
+wait_row_count nb:Logical_Switch_Port 1 name=sw1-p1 'up=true'
+
wait_row_count Service_Monitor 0
ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.2
@@ -1226,7 +1233,7 @@ check ovn-nbctl --wait=sb ls-lb-add sw0 lb1
AT_CAPTURE_FILE([sbflows])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw0 | tee sbflows | grep 'priority=120.*backends' | sed 's/table=..//'], 0, [dnl
- (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80);)
+ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
])
AS_BOX([Delete the Load_Balancer_Health_Check])
@@ -1236,7 +1243,7 @@ wait_row_count Service_Monitor 0
AT_CAPTURE_FILE([sbflows2])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw0 | tee sbflows2 | grep 'priority=120.*backends' | sed 's/table=..//'], [0],
-[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80);)
+[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
])
AS_BOX([Create the Load_Balancer_Health_Check again.])
@@ -1248,7 +1255,7 @@ check ovn-nbctl --wait=sb sync
ovn-sbctl dump-flows sw0 | grep backends | grep priority=120 > lflows.txt
AT_CHECK([cat lflows.txt | sed 's/table=..//'], [0], [dnl
- (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80);)
+ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
])
AS_BOX([Get the uuid of both the service_monitor])
@@ -1258,7 +1265,7 @@ sm_sw1_p1=$(fetch_column Service_Monitor _uuid logical_port=sw1-p1)
AT_CAPTURE_FILE([sbflows3])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw0 | tee sbflows 3 | grep 'priority=120.*backends' | sed 's/table=..//'], [0],
-[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80);)
+[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
])
AS_BOX([Set the service monitor for sw1-p1 to offline])
@@ -1269,7 +1276,7 @@ check ovn-nbctl --wait=sb sync
AT_CAPTURE_FILE([sbflows4])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw0 | tee sbflows4 | grep 'priority=120.*backends' | sed 's/table=..//'], [0],
-[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80);)
+[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);)
])
AS_BOX([Set the service monitor for sw0-p1 to offline])
@@ -1298,7 +1305,7 @@ check ovn-nbctl --wait=sb sync
AT_CAPTURE_FILE([sbflows7])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw0 | tee sbflows7 | grep backends | grep priority=120 | sed 's/table=..//'], 0,
-[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80);)
+[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
])
AS_BOX([Set the service monitor for sw1-p1 to error])
@@ -1309,7 +1316,7 @@ check ovn-nbctl --wait=sb sync
ovn-sbctl dump-flows sw0 | grep "ip4.dst == 10.0.0.10 && tcp.dst == 80" \
| grep priority=120 > lflows.txt
AT_CHECK([cat lflows.txt | sed 's/table=..//'], [0], [dnl
- (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80);)
+ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);)
])
AS_BOX([Add one more vip to lb1])
@@ -1335,8 +1342,8 @@ AT_CAPTURE_FILE([sbflows9])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw0 | tee sbflows9 | grep backends | grep priority=120 | sed 's/table=..//' | sort],
0,
-[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80);)
- (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg0[[1]] = 0; reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb_mark(backends=10.0.0.3:1000);)
+[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);)
+ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg0[[1]] = 0; reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000);)
])
AS_BOX([Set the service monitor for sw1-p1 to online])
@@ -1349,8 +1356,8 @@ AT_CAPTURE_FILE([sbflows10])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw0 | tee sbflows10 | grep backends | grep priority=120 | sed 's/table=..//' | sort],
0,
-[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80);)
- (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg0[[1]] = 0; reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb_mark(backends=10.0.0.3:1000,20.0.0.3:80);)
+[ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg0[[1]] = 0; reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
])
AS_BOX([Associate lb1 to sw1])
@@ -1359,8 +1366,8 @@ AT_CAPTURE_FILE([sbflows11])
OVS_WAIT_FOR_OUTPUT(
[ovn-sbctl dump-flows sw1 | tee sbflows11 | grep backends | grep priority=120 | sed 's/table=..//' | sort],
0, [dnl
- (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80);)
- (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg0[[1]] = 0; reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb_mark(backends=10.0.0.3:1000,20.0.0.3:80);)
+ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+ (ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg0[[1]] = 0; reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
])
AS_BOX([Now create lb2 same as lb1 but udp protocol.])
@@ -6734,6 +6741,7 @@ AT_CHECK([cat lrflows | grep -e lr_in_lookup_neighbor -e lr_in_learn_neighbor |
table=2 (lr_in_learn_neighbor), priority=90 , match=(nd_na), action=(put_nd(inport, nd.target, nd.tll); next;)
table=2 (lr_in_learn_neighbor), priority=90 , match=(nd_ns), action=(put_nd(inport, ip6.src, nd.sll); next;)
table=2 (lr_in_learn_neighbor), priority=95 , match=(nd_na && nd.tll == 0), action=(put_nd(inport, nd.target, eth.src); next;)
+ table=2 (lr_in_learn_neighbor), priority=95 , match=(nd_ns && (ip6.src == 0 || nd.sll == 0)), action=(next;)
])
AT_CLEANUP
diff --git a/tests/ovn.at b/tests/ovn.at
index 3c079e0fb..fd064b999 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -7432,7 +7432,7 @@ ovs-vsctl -- add-port br-int vif2 -- \
# Allow some time for ovn-northd and ovn-controller to catch up.
wait_for_ports_up
check ovn-nbctl --wait=hv sync
-ovn-nbctl dump-flows > sbflows
+ovn-sbctl dump-flows > sbflows
AT_CAPTURE_FILE([sbflows])
for i in 1 2; do
@@ -8037,7 +8037,7 @@ wait_for_ports_up
check ovn-nbctl --wait=hv sync
sleep 1
-ovn-nbctl dump-flows > sbflows
+ovn-sbctl dump-flows > sbflows
AT_CAPTURE_FILE([sbflows])
for i in 1 2; do
@@ -14015,6 +14015,7 @@ AT_CLEANUP
OVN_FOR_EACH_NORTHD([
AT_SETUP([options:multiple requested-chassis for logical port])
+AT_KEYWORDS([multi-chassis])
ovn_start
net_add n1
@@ -14104,6 +14105,7 @@ AT_CLEANUP
OVN_FOR_EACH_NORTHD([
AT_SETUP([options:multiple requested-chassis for logical port: change chassis role])
+AT_KEYWORDS([multi-chassis])
ovn_start
net_add n1
@@ -14153,6 +14155,7 @@ AT_CLEANUP
OVN_FOR_EACH_NORTHD([
AT_SETUP([options:multiple requested-chassis for logical port: unclaimed behavior])
+AT_KEYWORDS([multi-chassis])
ovn_start
net_add n1
@@ -14233,6 +14236,7 @@ AT_CLEANUP
OVN_FOR_EACH_NORTHD([
AT_SETUP([basic connectivity with multiple requested-chassis])
+AT_KEYWORDS([multi-chassis])
ovn_start
net_add n1
@@ -14567,6 +14571,7 @@ AT_CLEANUP
OVN_FOR_EACH_NORTHD([
AT_SETUP([localnet connectivity with multiple requested-chassis])
+AT_KEYWORDS([multi-chassis])
ovn_start
net_add n1
@@ -14660,6 +14665,14 @@ reset_env() {
for port in hv1/migrator hv2/migrator hv1/first hv2/second hv3/third; do
: > $port.expected
done
+
+ for hv in hv1 hv2 hv3; do
+ : > $hv/n1.expected
+ done
+
+ reset_pcap_file hv1 br-phys_n1 hv1/br-phys_n1
+ reset_pcap_file hv2 br-phys_n1 hv2/br-phys_n1
+ reset_pcap_file hv3 br-phys_n1 hv3/br-phys_n1
}
check_packets() {
@@ -14670,6 +14683,10 @@ check_packets() {
OVN_CHECK_PACKETS_CONTAIN([hv1/first-tx.pcap], [hv1/first.expected])
OVN_CHECK_PACKETS_CONTAIN([hv2/second-tx.pcap], [hv2/second.expected])
OVN_CHECK_PACKETS_CONTAIN([hv3/third-tx.pcap], [hv3/third.expected])
+
+ OVN_CHECK_PACKETS_CONTAIN([hv1/br-phys_n1-tx.pcap], [hv1/n1.expected])
+ OVN_CHECK_PACKETS_CONTAIN([hv2/br-phys_n1-tx.pcap], [hv2/n1.expected])
+ OVN_CHECK_PACKETS_CONTAIN([hv3/br-phys_n1-tx.pcap], [hv3/n1.expected])
}
migrator_tpa=$(ip_to_hex 10 0 0 100)
@@ -14694,10 +14711,10 @@ wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
wait_for_ports_up
# advertise location of ports through localnet port
-send_garp hv1 migrator 0000000000ff ffffffffffff $migrator_spa $migrator_tpa
-send_garp hv1 first 000000000001 ffffffffffff $first_spa $first_tpa
-send_garp hv2 second 000000000002 ffffffffffff $second_spa $second_tpa
-send_garp hv3 third 000000000003 ffffffffffff $third_spa $third_tpa
+send_garp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa $migrator_tpa
+send_garp hv1 first 000000000001 ffffffffffff $first_spa $first_spa
+send_garp hv2 second 000000000002 ffffffffffff $second_spa $second_spa
+send_garp hv3 third 000000000003 ffffffffffff $third_spa $third_spa
reset_env
# check that...
@@ -14717,6 +14734,7 @@ echo $request >> hv3/third.expected
# unicast from Second doesn't arrive to hv2:Migrator
request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa $migrator_tpa)
echo $request >> hv1/migrator.expected
+echo $request >> hv2/n1.expected
# mcast from Second arrives to hv1:Migrator
# mcast from Second doesn't arrive to hv2:Migrator
@@ -14724,11 +14742,13 @@ request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa $migrator_tp
echo $request >> hv1/migrator.expected
echo $request >> hv1/first.expected
echo $request >> hv3/third.expected
+echo $request >> hv2/n1.expected
# unicast from Third arrives to hv1:Migrator
# unicast from Third doesn't arrive to hv2:Migrator
request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa $migrator_tpa)
echo $request >> hv1/migrator.expected
+echo $request >> hv3/n1.expected
# mcast from Third arrives to hv1:Migrator
# mcast from Third doesn't arrive to hv2:Migrator
@@ -14736,14 +14756,17 @@ request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa $migrator_tpa)
echo $request >> hv1/migrator.expected
echo $request >> hv1/first.expected
echo $request >> hv2/second.expected
+echo $request >> hv3/n1.expected
# unicast from hv1:Migrator arrives to First, Second, and Third
request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
echo $request >> hv1/first.expected
request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
echo $request >> hv2/second.expected
+echo $request >> hv1/n1.expected
request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
echo $request >> hv3/third.expected
+echo $request >> hv1/n1.expected
# unicast from hv2:Migrator doesn't arrive to First, Second, or Third
request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
@@ -14755,6 +14778,7 @@ request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa $first_s
echo $request >> hv1/first.expected
echo $request >> hv2/second.expected
echo $request >> hv3/third.expected
+echo $request >> hv1/n1.expected
# mcast from hv2:Migrator doesn't arrive to First, Second, or Third
request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
@@ -14841,8 +14865,18 @@ echo $request >> hv1/first.expected
echo $request >> hv2/second.expected
echo $request >> hv3/third.expected
+# unicast from Second arrives to Third through localnet port
+request=$(send_arp hv2 second 000000000002 000000000003 $second_spa $third_spa)
+echo $request >> hv2/n1.expected
+
check_packets
+# Wait for MAC address of migrator to be on hv1 related port in main switch.
+# Hence the MAC will not migrate back unexpectedly later.
+p1=$(as main ovs-ofctl show n1 | grep hv1_br-phys | awk '{print int($1)}')
+p2=$(as main ovs-ofctl show n1 | grep hv2_br-phys | awk '{print int($1)}')
+OVS_WAIT_UNTIL([test x`as main ovs-appctl fdb/show n1 | grep 00:00:00:00:00:ff | awk '{print $1}'` = x$p1])
+
# Complete migration: destination is bound
check ovn-nbctl lsp-set-options migrator requested-chassis=hv2
wait_column "$hv2_uuid" Port_Binding chassis logical_port=migrator
@@ -14852,17 +14886,22 @@ wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
wait_for_ports_up
check ovn-nbctl --wait=hv sync
-sleep 1
+OVS_WAIT_UNTIL([test `as hv2 ovs-vsctl get Interface migrator external_ids:ovn-installed` = '"true"'])
# advertise new location of the port through localnet port
-send_garp hv2 migrator 0000000000ff ffffffffffff $migrator_spa $migrator_tpa
+send_garp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa $migrator_tpa
+
reset_env
+# Wait for MAC address of migrator to be on hv2 port in main switch
+OVS_WAIT_UNTIL([test x`as main ovs-appctl fdb/show n1 | grep 00:00:00:00:00:ff | awk '{print $1}'` = x$p2])
+
# check that...
# unicast from Third doesn't arrive to hv1:Migrator
# unicast from Third arrives to hv2:Migrator
request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa $migrator_tpa)
echo $request >> hv2/migrator.expected
+echo $request >> hv3/n1.expected
# mcast from Third doesn't arrive to hv1:Migrator
# mcast from Third arrives to hv2:Migrator
@@ -14870,11 +14909,13 @@ request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa $migrator_tpa)
echo $request >> hv2/migrator.expected
echo $request >> hv1/first.expected
echo $request >> hv2/second.expected
+echo $request >> hv3/n1.expected
# unicast from First doesn't arrive to hv1:Migrator
# unicast from First arrives to hv2:Migrator
request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa $migrator_tpa)
echo $request >> hv2/migrator.expected
+echo $request >> hv1/n1.expected
# mcast from First doesn't arrive to hv1:Migrator
# mcast from First arrives to hv2:Migrator binding
@@ -14882,6 +14923,7 @@ request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa $migrator_tpa)
echo $request >> hv2/migrator.expected
echo $request >> hv2/second.expected
echo $request >> hv3/third.expected
+echo $request >> hv1/n1.expected
# unicast from Second doesn't arrive to hv1:Migrator
# unicast from Second arrives to hv2:Migrator
@@ -14903,10 +14945,12 @@ request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa $third_s
# unicast from hv2:Migrator arrives to First, Second, and Third
request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
echo $request >> hv1/first.expected
+echo $request >> hv2/n1.expected
request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
echo $request >> hv2/second.expected
request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
echo $request >> hv3/third.expected
+echo $request >> hv2/n1.expected
# mcast from hv1:Migrator doesn't arrive to First, Second, or Third
request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
@@ -14916,14 +14960,400 @@ request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa $first_s
echo $request >> hv1/first.expected
echo $request >> hv2/second.expected
echo $request >> hv3/third.expected
+echo $request >> hv2/n1.expected
+
+check_packets
+
+OVN_CLEANUP([hv1],[hv2],[hv3])
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([options:activation-strategy for logical port])
+AT_KEYWORDS([multi-chassis])
+ovn_start
+
+net_add n1
+
+sim_add hv1
+as hv1
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.11
+
+sim_add hv2
+as hv2
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.12
+
+sim_add hv3
+as hv3
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.13
+
+# Disable local ARP responder to pass ARP requests through tunnels
+check ovn-nbctl ls-add ls0 -- add Logical_Switch ls0 other_config vlan-passthru=true
+
+check ovn-nbctl lsp-add ls0 migrator
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1,hv2 \
+ activation-strategy=rarp
+
+check ovn-nbctl lsp-add ls0 first
+check ovn-nbctl lsp-set-options first requested-chassis=hv1
+check ovn-nbctl lsp-add ls0 second
+check ovn-nbctl lsp-set-options second requested-chassis=hv2
+check ovn-nbctl lsp-add ls0 outside
+check ovn-nbctl lsp-set-options outside requested-chassis=hv3
+
+check ovn-nbctl lsp-set-addresses migrator "00:00:00:00:00:10 10.0.0.10"
+check ovn-nbctl lsp-set-addresses first "00:00:00:00:00:01 10.0.0.1"
+check ovn-nbctl lsp-set-addresses second "00:00:00:00:00:02 10.0.0.2"
+check ovn-nbctl lsp-set-addresses outside "00:00:00:00:00:03 10.0.0.3"
+
+for hv in hv1 hv2; do
+ as $hv check ovs-vsctl -- add-port br-int migrator -- \
+ set Interface migrator external-ids:iface-id=migrator \
+ options:tx_pcap=$hv/migrator-tx.pcap \
+ options:rxq_pcap=$hv/migrator-rx.pcap
+done
+
+as hv1 check ovs-vsctl -- add-port br-int first -- \
+ set Interface first external-ids:iface-id=first
+as hv2 check ovs-vsctl -- add-port br-int second -- \
+ set Interface second external-ids:iface-id=second
+as hv3 check ovs-vsctl -- add-port br-int outside -- \
+ set Interface outside external-ids:iface-id=outside
+
+for hv in hv1 hv2 hv3; do
+ wait_row_count Chassis 1 name=$hv
+done
+hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
+hv2_uuid=$(fetch_column Chassis _uuid name=hv2)
+hv3_uuid=$(fetch_column Chassis _uuid name=hv3)
+
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_additional_chassis logical_port=migrator
+
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=first
+wait_column "$hv2_uuid" Port_Binding chassis logical_port=second
+wait_column "$hv3_uuid" Port_Binding chassis logical_port=outside
+
+OVN_POPULATE_ARP
+
+send_arp() {
+ local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+ local request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
+ as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+ echo "${request}"
+}
+
+send_rarp() {
+ local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+ local request=${eth_dst}${eth_src}80350001080006040001${eth_src}${spa}${eth_dst}${tpa}
+ as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+ echo "${request}"
+}
+
+reset_pcap_file() {
+ local hv=$1
+ local iface=$2
+ local pcap_file=$3
+ as $hv check ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \
+ options:rxq_pcap=dummy-rx.pcap
+ check rm -f ${pcap_file}*.pcap
+ as $hv check ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \
+ options:rxq_pcap=${pcap_file}-rx.pcap
+}
+
+reset_env() {
+ reset_pcap_file hv1 migrator hv1/migrator
+ reset_pcap_file hv2 migrator hv2/migrator
+ reset_pcap_file hv1 first hv1/first
+ reset_pcap_file hv2 second hv2/second
+ reset_pcap_file hv3 outside hv3/outside
+
+ for port in hv1/migrator hv2/migrator hv1/first hv2/second hv3/outside; do
+ : > $port.expected
+ done
+}
+
+check_packets() {
+ OVN_CHECK_PACKETS([hv1/migrator-tx.pcap], [hv1/migrator.expected])
+ OVN_CHECK_PACKETS([hv2/migrator-tx.pcap], [hv2/migrator.expected])
+ OVN_CHECK_PACKETS([hv3/outside-tx.pcap], [hv3/outside.expected])
+ OVN_CHECK_PACKETS([hv1/first-tx.pcap], [hv1/first.expected])
+ OVN_CHECK_PACKETS([hv2/second-tx.pcap], [hv2/second.expected])
+}
+
+migrator_spa=$(ip_to_hex 10 0 0 10)
+first_spa=$(ip_to_hex 10 0 0 1)
+second_spa=$(ip_to_hex 10 0 0 2)
+outside_spa=$(ip_to_hex 10 0 0 3)
+
+reset_env
+
+# Packet from hv3:Outside arrives to hv1:Migrator
+# hv3:Outside cannot reach hv2:Migrator because it is blocked by RARP strategy
+request=$(send_arp hv3 outside 000000000003 000000000010 $outside_spa $migrator_spa)
+echo $request >> hv1/migrator.expected
+
+# Packet from hv1:First arrives to hv1:Migrator
+# hv1:First cannot reach hv2:Migrator because it is blocked by RARP strategy
+request=$(send_arp hv1 first 000000000001 000000000010 $first_spa $migrator_spa)
+echo $request >> hv1/migrator.expected
+
+# Packet from hv2:Second arrives to hv1:Migrator
+# hv2:Second cannot reach hv2:Migrator because it is blocked by RARP strategy
+request=$(send_arp hv2 second 000000000002 000000000010 $second_spa $migrator_spa)
+echo $request >> hv1/migrator.expected
+
+check_packets
+reset_env
+
+# Packet from hv1:Migrator arrives to hv3:Outside
+request=$(send_arp hv1 migrator 000000000010 000000000003 $migrator_spa $outside_spa)
+echo $request >> hv3/outside.expected
+
+# Packet from hv1:Migrator arrives to hv1:First
+request=$(send_arp hv1 migrator 000000000010 000000000001 $migrator_spa $first_spa)
+echo $request >> hv1/first.expected
+
+# Packet from hv1:Migrator arrives to hv2:Second
+request=$(send_arp hv1 migrator 000000000010 000000000002 $migrator_spa $second_spa)
+echo $request >> hv2/second.expected
+
+check_packets
+reset_env
+
+# hv2:Migrator cannot reach to hv3:Outside because it is blocked by RARP strategy
+request=$(send_arp hv2 migrator 000000000010 000000000003 $migrator_spa $outside_spa)
+
+check_packets
+reset_env
+
+AT_CHECK([ovn-sbctl find port_binding logical_port=migrator | grep -q additional-chassis-activated], [1])
+
+# Now activate hv2:Migrator location
+request=$(send_rarp hv2 migrator 000000000010 ffffffffffff $migrator_spa $migrator_spa)
+
+# RARP was reinjected into the pipeline
+echo $request >> hv3/outside.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+check_packets
+reset_env
+
+pb_uuid=$(ovn-sbctl --bare --columns _uuid find Port_Binding logical_port=migrator)
+OVS_WAIT_UNTIL([test xhv2 = x$(ovn-sbctl get Port_Binding $pb_uuid options:additional-chassis-activated | tr -d '""')])
+
+# Now packet arrives to both locations
+request=$(send_arp hv3 outside 000000000003 000000000010 $outside_spa $migrator_spa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
check_packets
+reset_env
+
+# Packet from hv1:Migrator still arrives to hv3:Outside
+request=$(send_arp hv1 migrator 000000000010 000000000003 $migrator_spa $outside_spa)
+echo $request >> hv3/outside.expected
+
+check_packets
+reset_env
+
+# hv2:Migrator can now reach to hv3:Outside because RARP strategy activated it
+request=$(send_arp hv2 migrator 000000000010 000000000003 $migrator_spa $outside_spa)
+echo $request >> hv3/outside.expected
+
+check_packets
+
+# complete port migration and check that -activated flag is reset
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv2
+OVS_WAIT_UNTIL([test x = x$(ovn-sbctl get Port_Binding $pb_uuid options:additional-chassis-activated)])
OVN_CLEANUP([hv1],[hv2],[hv3])
AT_CLEANUP
])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([options:activation-strategy=rarp is not waiting for southbound db])
+AT_KEYWORDS([multi-chassis])
+# unskip when ovn-controller is able to process incremental updates to flow
+# table without ovsdb transaction available
+AT_SKIP_IF([true])
+
+ovn_start
+
+net_add n1
+
+sim_add hv1
+as hv1
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.11
+
+sim_add hv2
+as hv2
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.12
+
+# Disable local ARP responder to pass ARP requests through tunnels
+check ovn-nbctl ls-add ls0 -- add Logical_Switch ls0 other_config vlan-passthru=true
+
+check ovn-nbctl lsp-add ls0 migrator
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1,hv2 \
+ activation-strategy=rarp
+
+check ovn-nbctl lsp-add ls0 first
+check ovn-nbctl lsp-set-options first requested-chassis=hv1
+
+check ovn-nbctl lsp-set-addresses migrator "00:00:00:00:00:10 10.0.0.10"
+check ovn-nbctl lsp-set-addresses first "00:00:00:00:00:01 10.0.0.1"
+
+for hv in hv1 hv2; do
+ as $hv check ovs-vsctl -- add-port br-int migrator -- \
+ set Interface migrator external-ids:iface-id=migrator \
+ options:tx_pcap=$hv/migrator-tx.pcap \
+ options:rxq_pcap=$hv/migrator-rx.pcap
+done
+
+as hv1 check ovs-vsctl -- add-port br-int first -- \
+ set Interface first external-ids:iface-id=first
+
+for hv in hv1 hv2; do
+ wait_row_count Chassis 1 name=$hv
+done
+hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
+hv2_uuid=$(fetch_column Chassis _uuid name=hv2)
+
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_additional_chassis logical_port=migrator
+
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=first
+
+OVN_POPULATE_ARP
+
+send_arp() {
+ local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+ local request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
+ as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+ echo "${request}"
+}
+
+send_rarp() {
+ local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+ local request=${eth_dst}${eth_src}80350001080006040001${eth_src}${spa}${eth_dst}${tpa}
+ as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+ echo "${request}"
+}
+
+reset_pcap_file() {
+ local hv=$1
+ local iface=$2
+ local pcap_file=$3
+ as $hv check ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \
+ options:rxq_pcap=dummy-rx.pcap
+ check rm -f ${pcap_file}*.pcap
+ as $hv check ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \
+ options:rxq_pcap=${pcap_file}-rx.pcap
+}
+
+reset_env() {
+ reset_pcap_file hv1 migrator hv1/migrator
+ reset_pcap_file hv2 migrator hv2/migrator
+ reset_pcap_file hv1 first hv1/first
+
+ for port in hv1/migrator hv2/migrator hv1/first; do
+ : > $port.expected
+ done
+}
+
+check_packets() {
+ OVN_CHECK_PACKETS([hv1/migrator-tx.pcap], [hv1/migrator.expected])
+ OVN_CHECK_PACKETS([hv2/migrator-tx.pcap], [hv2/migrator.expected])
+ OVN_CHECK_PACKETS([hv1/first-tx.pcap], [hv1/first.expected])
+}
+
+migrator_spa=$(ip_to_hex 10 0 0 10)
+first_spa=$(ip_to_hex 10 0 0 1)
+
+reset_env
+
+# Packet from hv1:First arrives to hv1:Migrator
+# hv1:First cannot reach hv2:Migrator because it is blocked by RARP strategy
+request=$(send_arp hv1 first 000000000001 000000000010 $first_spa $migrator_spa)
+echo $request >> hv1/migrator.expected
+
+check_packets
+reset_env
+
+# Packet from hv1:Migrator arrives to hv1:First
+request=$(send_arp hv1 migrator 000000000010 000000000001 $migrator_spa $first_spa)
+echo $request >> hv1/first.expected
+
+check_packets
+reset_env
+
+# hv2:Migrator cannot reach to hv1:First because it is blocked by RARP strategy
+request=$(send_arp hv2 migrator 000000000010 000000000001 $migrator_spa $first_spa)
+
+check_packets
+reset_env
+
+# Before proceeding, stop ovsdb-server to make sure we test in the environment
+# that can't remove flows triggered by updates to database
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+# Now activate hv2:Migrator location
+request=$(send_rarp hv2 migrator 000000000010 ffffffffffff $migrator_spa $migrator_spa)
+
+# RARP was reinjected into the pipeline
+echo $request >> hv1/first.expected
+
+# Now packet from hv1:First arrives to both locations
+request=$(send_arp hv1 first 000000000001 000000000010 $first_spa $migrator_spa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# Packet from hv1:Migrator still arrives to hv1:First
+request=$(send_arp hv1 migrator 000000000010 000000000001 $migrator_spa $first_spa)
+echo $request >> hv1/first.expected
+
+# hv2:Migrator can now reach to hv1:First because RARP strategy activated it
+request=$(send_arp hv2 migrator 000000000010 000000000001 $migrator_spa $first_spa)
+echo $request >> hv1/first.expected
+
+check_packets
+
+# restart ovsdb-server before cleaning up to give ovn-controller a chance to
+# exit gracefully
+mv $ovs_base/ovn-sb/ovsdb-server.log $ovs_base/ovn-sb/ovsdb-server.log.prev
+as ovn-sb start_daemon ovsdb-server \
+ -vjsonrpc \
+ --remote=punix:$ovs_base/ovn-sb/$1.sock \
+ --remote=db:OVN_Southbound,SB_Global,connections \
+ --private-key=$PKIDIR/testpki-test-privkey.pem \
+ --certificate=$PKIDIR/testpki-test-cert.pem \
+ --ca-cert=$PKIDIR/testpki-cacert.pem \
+ $ovs_base/ovn-sb/ovn-sb.db
+
+PARSE_LISTENING_PORT([$ovs_base/ovn-sb/ovsdb-server.log], [TCP_PORT])
+for i in 1 2; do
+ as hv$i
+ ovs-vsctl \
+ -- set Open_vSwitch . external-ids:ovn-remote=ssl:127.0.0.1:$TCP_PORT
+done
+OVN_CLEANUP([hv1],[hv2])
+
+AT_CLEANUP
+])
+
OVN_FOR_EACH_NORTHD([
AT_SETUP([options:requested-chassis for logical port])
ovn_start
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index 4bf22593a..1cabf1f31 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -4441,7 +4441,7 @@ tcp,orig=(src=10.0.0.4,dst=10.0.0.10,sport=<cleared>,dport=<cleared>),reply=(src
tcp,orig=(src=10.0.0.4,dst=10.0.0.10,sport=<cleared>,dport=<cleared>),reply=(src=20.0.0.3,dst=10.0.0.4,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
-# Stop webserer in sw0-p1
+# Stop webserver in sw0-p1
kill `cat $sw0_p1_pid_file`
# Wait until service_monitor for sw0-p1 is set to offline
@@ -4465,6 +4465,15 @@ sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=10.0.0.4,dst=10.0.0.10,sport=<cleared>,dport=<cleared>),reply=(src=20.0.0.3,dst=10.0.0.4,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
+# trigger port binding release and check if status changed to offline
+ovs-vsctl remove interface ovs-sw1-p1 external_ids iface-id
+wait_row_count Service_Monitor 2
+wait_row_count Service_Monitor 2 status=offline
+
+ovs-vsctl set interface ovs-sw1-p1 external_ids:iface-id=sw1-p1
+wait_row_count Service_Monitor 2
+wait_row_count Service_Monitor 1 status=online
+
# Create udp load balancer.
ovn-nbctl lb-add lb2 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80 udp
lb_udp=`ovn-nbctl lb-list | grep udp | awk '{print $1}'`
@@ -6741,6 +6750,21 @@ NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 172.18.2.10 | FORMAT_PING], \
[0], [dnl
3 packets transmitted, 3 received, 0% packet loss, time 0ms
])
+
+dnat_and_snat_uuid=$(fetch_column nb:NAT _uuid external_ip=172.18.2.10)
+ovn-nbctl set NAT $dnat_and_snat_uuid options:stateless=true
+
+# A ping from vm1 should hairpin in lr1 and successfully DNAT to vm2
+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 172.18.2.10 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+# A ping from vm2 should hairpin in lr1 and successfully DNAT to vm2
+NS_CHECK_EXEC([vm2], [ping -q -c 3 -i 0.3 -w 2 172.18.2.10 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
kill $(pidof ovn-controller)
as ovn-sb
diff --git a/utilities/ovn-ctl b/utilities/ovn-ctl
index d733aa42d..93be9b84b 100755
--- a/utilities/ovn-ctl
+++ b/utilities/ovn-ctl
@@ -42,8 +42,21 @@ ovn_ic_db_conf_file="$ovn_etcdir/ovn-ic-db-params.conf"
pidfile_is_running () {
pidfile=$1
- test -e "$pidfile" && [ -s "$pidfile" ] && pid=`cat "$pidfile"` && pid_exists "$pid"
-} >/dev/null 2>&1
+ cmd=$2
+ if [ ! -s "$pidfile" ]; then
+ # file missing or empty
+ return 1
+ fi
+ pid=`cat "$pidfile"`
+ if ! pid_exists $pid; then
+ # pid is dead
+ return 1
+ fi
+ if [ -n "$cmd" ]; then
+ return $(pid_comm_check "$cmd" "$pid")
+ fi
+ return 0
+}
stop_nb_ovsdb() {
OVS_RUNDIR=${OVS_RUNDIR} stop_ovn_daemon ovnnb_db $DB_NB_PIDFILE $DB_NB_CTRL_SOCK
@@ -199,7 +212,7 @@ start_ovsdb__() {
ovn_install_dir "$ovn_etcdir"
# Check and eventually start ovsdb-server for DB
- if pidfile_is_running $db_pid_file; then
+ if pidfile_is_running $db_pid_file ovsdb-server; then
return
fi
@@ -298,6 +311,10 @@ $cluster_remote_port
set "$@" --sync-from=`cat $active_conf_file`
fi
+ if test X"$extra_args" != X; then
+ set "$@" $extra_args
+ fi
+
local run_ovsdb_in_bg="no"
local process_id=
if test X$detach = Xno && test $mode = cluster && test -z "$cluster_remote_addr" ; then
@@ -528,6 +545,10 @@ start_ic () {
set "$@" $OVN_IC_LOG $ovn_ic_params
+ if test X"$extra_args" != X; then
+ set "$@" $extra_args
+ fi
+
OVS_RUNDIR=${OVS_RUNDIR} start_ovn_daemon "$OVN_IC_PRIORITY" "$OVN_IC_WRAPPER" "$@"
fi
}
@@ -550,6 +571,10 @@ start_controller () {
[ "$OVN_USER" != "" ] && set "$@" --user "$OVN_USER"
+ if test X"$extra_args" != X; then
+ set "$@" $extra_args
+ fi
+
OVS_RUNDIR=${OVS_RUNDIR} start_ovn_daemon "$OVN_CONTROLLER_PRIORITY" "$OVN_CONTROLLER_WRAPPER" "$@"
}
@@ -577,6 +602,10 @@ start_controller_vtep () {
[ "$OVN_USER" != "" ] && set "$@" --user "$OVN_USER"
+ if test X"$extra_args" != X; then
+ set "$@" $extra_args
+ fi
+
OVS_RUNDIR=${OVS_RUNDIR} start_ovn_daemon "$OVN_CONTROLLER_PRIORITY" "$OVN_CONTROLLER_WRAPPER" "$@"
}
@@ -1093,8 +1122,10 @@ EOF
set_defaults
command=
+extra_args=
for arg
do
+ shift
case $arg in
-h | --help)
usage
@@ -1117,6 +1148,10 @@ do
type=bool
set_option
;;
+ --)
+ extra_args=$@
+ break
+ ;;
-*)
echo >&2 "$0: unknown option \"$arg\" (use --help for help)"
exit 1
diff --git a/utilities/ovn-ctl.8.xml b/utilities/ovn-ctl.8.xml
index a1d39b22b..42d16fabc 100644
--- a/utilities/ovn-ctl.8.xml
+++ b/utilities/ovn-ctl.8.xml
@@ -4,7 +4,10 @@
<p>ovn-ctl -- Open Virtual Network northbound daemon lifecycle utility</p>
<h1>Synopsis</h1>
- <p><code>ovn-ctl</code> [<var>options</var>] <var>command</var></p>
+ <p>
+ <code>ovn-ctl</code> [<var>options</var>] <var>command</var>
+ [--- <var>extra_args</var>]
+ </p>
<h1>Description</h1>
<p>This program is intended to be invoked internally by Open Virtual Network
@@ -156,6 +159,15 @@
<p><code>--db-nb-probe-interval-to-active=<var>Time in milliseconds</var></code></p>
<p><code>--db-sb-probe-interval-to-active=<var>Time in milliseconds</var></code></p>
+ <h1> Extra Options </h1>
+ <p>
+ Any options after '--' will be passed on to the binary run by
+ <var>command</var> with the exception of start_northd, which can have
+ options specified in ovn-northd-db-params.conf. Any <var>extra_args</var>
+ passed to start_northd will be passed to the ovsdb-servers if
+ <code>--ovn-manage-ovsdb=yes</code>
+ </p>
+
<h1>Configuration files</h1>
<p>Following are the optional configuration files. If present, it should be located in the etc dir</p>
diff --git a/utilities/ovn-dbctl.c b/utilities/ovn-dbctl.c
index a292e589d..c4cc8c9b2 100644
--- a/utilities/ovn-dbctl.c
+++ b/utilities/ovn-dbctl.c
@@ -202,6 +202,13 @@ ovn_dbctl_main(int argc, char *argv[],
error = ctl_parse_commands(argc - optind, argv_ + optind,
&local_options, &commands, &n_commands);
if (error) {
+ ovsdb_idl_destroy(idl);
+ idl = the_idl = NULL;
+
+ for (int i = 0; i < argc; i++) {
+ free(argv_[i]);
+ }
+ free(argv_);
ctl_fatal("%s", error);
}
diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c
index 6a8ba6330..2918db1c6 100644
--- a/utilities/ovn-nbctl.c
+++ b/utilities/ovn-nbctl.c
@@ -5345,7 +5345,7 @@ nbctl_lrp_set_gateway_chassis(struct ctl_context *ctx)
}
const char *chassis_name = ctx->argv[2];
- if (ctx->argv[3]) {
+ if (ctx->argc == 4) {
error = parse_priority(ctx->argv[3], &priority);
if (error) {
ctx->error = error;