diff --git a/distribution/com.redhat.dnsconfd.conf b/distribution/com.redhat.dnsconfd.conf index 10ddee3..17323cc 100644 --- a/distribution/com.redhat.dnsconfd.conf +++ b/distribution/com.redhat.dnsconfd.conf @@ -43,5 +43,11 @@ send_member="RestartUnit"/> + + + + + + diff --git a/distribution/dnsconfd-config.8 b/distribution/dnsconfd-config.8 index efa879b..0734be8 100644 --- a/distribution/dnsconfd-config.8 +++ b/distribution/dnsconfd-config.8 @@ -1,4 +1,4 @@ -.TH "dnsconfd-config" "8" "10 Oct 2023" "dnsconfd-1.0.2" "" +.TH "dnsconfd-config" "8" "10 Oct 2023" "dnsconfd-1.1.2" "" .SH NAME diff --git a/distribution/dnsconfd-reload.8 b/distribution/dnsconfd-reload.8 index a651c70..57fbab5 100644 --- a/distribution/dnsconfd-reload.8 +++ b/distribution/dnsconfd-reload.8 @@ -1,4 +1,4 @@ -.TH "dnsconfd-reload" "8" "10 Oct 2023" "dnsconfd-1.0.2" "" +.TH "dnsconfd-reload" "8" "10 Oct 2023" "dnsconfd-1.1.2" "" .SH NAME diff --git a/distribution/dnsconfd-status.8 b/distribution/dnsconfd-status.8 index 7fce92f..de3d2d4 100644 --- a/distribution/dnsconfd-status.8 +++ b/distribution/dnsconfd-status.8 @@ -1,4 +1,4 @@ -.TH "dnsconfd-status" "8" "10 Oct 2023" "dnsconfd-1.0.2" "" +.TH "dnsconfd-status" "8" "10 Oct 2023" "dnsconfd-1.1.2" "" .SH NAME diff --git a/distribution/dnsconfd.8 b/distribution/dnsconfd.8 index 10faa3d..92a5d12 100644 --- a/distribution/dnsconfd.8 +++ b/distribution/dnsconfd.8 @@ -1,4 +1,4 @@ -.TH "dnsconfd" "8" "10 Oct 2023" "dnsconfd-1.0.2" "" +.TH "dnsconfd" "8" "10 Oct 2023" "dnsconfd-1.1.2" "" .SH NAME @@ -33,6 +33,8 @@ Path where config file is located, default /etc/dnsconfd.conf Options to be used in resolv.conf for alteration of resolver, default "edns0 trust-ad" .IP "--dnssec-enabled options" Enable dnssec record validation, default no +.IP "--handle-routing" +Dnsconfd will submit necessary routes to routing manager, default yes .SH "EXIT STATUS" @@ -46,6 +48,8 @@ Failed communication through DBUS. Failed interaction with resolv.conf .IP 4 Failed to configure dns cache service +.IP 5 +Failed to set up necessary routes .SH ENVIRONMENT Environment variables have lower priority than command line options but higher @@ -67,11 +71,18 @@ Path where config file is located, default /etc/dnsconfd.conf Options to be used in resolv.conf for alteration of resolver, default "edns0 trust-ad" .IP DNSSEC_ENABLED Enable dnssec record validation, default no +.IP HANDLE_ROUTING +Dnsconfd will submit necessary routes to routing manager, default yes .SH FILES Dnsconfd manages resolv.conf to route domain name resolution to local cache service. Default location of the configuration file is /etc/dnsconfd.conf. +.SH ROUTING +Dnsconfd by default submits routes through NetworkManager, so system contacts +DNS servers through the right interfaces. This behaviour can be turned off +by setting handle_routing option to 'no'. + .SH NOTES The only currently supported backend is Unbound. diff --git a/distribution/dnsconfd.rules b/distribution/dnsconfd.rules index f4f0cff..5da09b5 100644 --- a/distribution/dnsconfd.rules +++ b/distribution/dnsconfd.rules @@ -6,3 +6,10 @@ polkit.addRule(function(action, subject) { return polkit.Result.YES; } }) +polkit.addRule(function(action, subject) { + if (action.id == "org.freedesktop.NetworkManager.network-control" && + subject.user == "dnsconfd") + { + return polkit.Result.YES; + } +}); diff --git a/distribution/dnsconfd.spec b/distribution/dnsconfd.spec index 093391e..9f9048b 100644 --- a/distribution/dnsconfd.spec +++ b/distribution/dnsconfd.spec @@ -2,7 +2,7 @@ %global selinuxtype targeted Name: dnsconfd -Version: 1.0.2 +Version: 1.1.2 Release: 1%{?dist} Summary: Local DNS cache configuration daemon License: MIT @@ -168,6 +168,9 @@ fi %{_tmpfilesdir}/dnsconfd-unbound.conf %changelog +* Mon Jul 22 2024 Tomas Korbar - 1.1.2-1 +- Release 1.1.2 + * Thu Jun 27 2024 Tomas Korbar - 1.0.2-1 - Release 1.0.2 diff --git a/distribution/dnsconfd.te b/distribution/dnsconfd.te index aa057ee..c9eb972 100644 --- a/distribution/dnsconfd.te +++ b/distribution/dnsconfd.te @@ -30,6 +30,7 @@ require { type tmp_t; type tmpfs_t; type NetworkManager_var_run_t; + type NetworkManager_t; } type dnsconfd_t; @@ -110,3 +111,5 @@ allow dnsconfd_t dnsconfd_var_run_t:file { open write getattr ioctl read }; allow dnsconfd_t dnsconfd_var_run_t:dir search; allow dnsconfd_t dnsconfd_t:dbus send_msg; + +allow dnsconfd_t NetworkManager_t:dbus send_msg; diff --git a/dnsconfd/argument_parser.py b/dnsconfd/argument_parser.py index 75e591c..b8fae0b 100644 --- a/dnsconfd/argument_parser.py +++ b/dnsconfd/argument_parser.py @@ -40,7 +40,11 @@ def __init__(self, *args, **kwargs) -> None: "edns0 trust-ad"), ("dnssec_enabled", "Enable dnssec record validation, default no", - False) + False), + ("handle_routing", + "Dnsconfd will submit necessary routes to routing manager, " + "default yes", + True) ] def add_arguments(self): @@ -162,5 +166,8 @@ def _read_config(self, path: str) -> dict: # when invalid config is provided self.lgr.warning("Bad config provided") return {arg: val for (arg, _, val) in self._config_values} + for key in config.keys(): + if config[key] == "yes": + config[key] = True return config diff --git a/dnsconfd/fsm/context_state.py b/dnsconfd/fsm/context_state.py index 35d5fe3..cae5e02 100644 --- a/dnsconfd/fsm/context_state.py +++ b/dnsconfd/fsm/context_state.py @@ -23,3 +23,5 @@ class ContextState(Enum): WAITING_RESTART_JOB = 17 # interrupt REVERT_RESOLV_ON_FAILED_RESTART = 18 CONFIGURING_DNS_MANAGER = 19 + UPDATING_ROUTES = 20 + REMOVING_ROUTES = 21 diff --git a/dnsconfd/fsm/dnsconfd_context.py b/dnsconfd/fsm/dnsconfd_context.py index 423f406..f235f01 100644 --- a/dnsconfd/fsm/dnsconfd_context.py +++ b/dnsconfd/fsm/dnsconfd_context.py @@ -11,6 +11,7 @@ import dbus.service import json import dbus.connection +import ipaddress class DnsconfdContext: @@ -24,21 +25,23 @@ def __init__(self, config: dict, main_loop: object): :type main_loop: object """ self.my_address = config["listen_address"] - if config["dnssec_enabled"] is True: - self.dnssec_enabled = config["dnssec_enabled"] - else: - self.dnssec_enabled = config["dnssec_enabled"] == "yes" + + self.dnssec_enabled = config["dnssec_enabled"] is True + self.wire_priority = config["prioritize_wire"] is True + self.handle_routes = config["handle_routing"] is True self.sys_mgr = SystemManager(config) self._main_loop = main_loop - self._systemd_object = None self._systemd_manager = None + self._nm_interface = None self._signal_connection = None self.dns_mgr = None self.interfaces: dict[int, InterfaceConfiguration] = {} + self.routes = {} + self.lgr = logging.getLogger(self.__class__.__name__) # dictionary, systemd jobs -> event that should be emitted on success, @@ -58,8 +61,7 @@ def __init__(self, config: dict, main_loop: object): "UPDATE": (ContextState.STARTING, self._update_transition), "STOP": (ContextState.STOPPING, - lambda y: ContextEvent("EXIT", - ExitCode.GRACEFUL_STOP)) + self._exit_transition) }, ContextState.CONFIGURING_DNS_MANAGER: { "SUCCESS": (ContextState.CONNECTING_DBUS, @@ -87,7 +89,7 @@ def __init__(self, config: dict, main_loop: object): "START_OK": (ContextState.POLLING, self._job_finished_success_transition), "START_FAIL": (ContextState.STOPPING, - self._exit_transition), + self._service_failure_exit_transition), "UPDATE": (ContextState.WAITING_FOR_START_JOB, self._update_transition), "STOP": (ContextState.WAITING_TO_SUBMIT_STOP_JOB, @@ -96,8 +98,8 @@ def __init__(self, config: dict, main_loop: object): ContextState.WAITING_TO_SUBMIT_STOP_JOB: { "START_OK": (ContextState.SUBMITTING_STOP_JOB, self._waiting_to_submit_success_transition), - "START_FAIL": (ContextState.STOPPING, - lambda y: ExitCode.SERVICE_FAILURE), + "START_FAIL": (ContextState.REMOVING_ROUTES, + self._to_removing_routes_transition), "STOP": (ContextState.WAITING_TO_SUBMIT_STOP_JOB, lambda y: None), "UPDATE": (ContextState.WAITING_TO_SUBMIT_STOP_JOB, @@ -105,7 +107,7 @@ def __init__(self, config: dict, main_loop: object): "RESTART_SUCCESS": (ContextState.REVERTING_RESOLV_CONF, self._running_stop_transition), "RESTART_FAIL": (ContextState.REVERTING_RESOLV_CONF, - self._running_stop_transition) + self._restart_failure_stop_transition) }, ContextState.POLLING: { "TIMER_UP": (ContextState.POLLING, @@ -136,9 +138,15 @@ def __init__(self, config: dict, main_loop: object): ContextState.UPDATING_RESOLV_CONF: { "FAIL": (ContextState.SUBMITTING_STOP_JOB, self._updating_resolv_conf_fail_transition), - "SUCCESS": (ContextState.UPDATING_DNS_MANAGER, + "SUCCESS": (ContextState.UPDATING_ROUTES, self._updating_resolv_conf_success_transition) }, + ContextState.UPDATING_ROUTES: { + "FAIL": (ContextState.REVERTING_RESOLV_CONF, + self._running_stop_transition), + "SUCCESS": (ContextState.UPDATING_DNS_MANAGER, + self._updating_routes_success_transition) + }, ContextState.UPDATING_DNS_MANAGER: { "FAIL": (ContextState.REVERTING_RESOLV_CONF, self.updating_dns_manager_fail_transition), @@ -146,15 +154,14 @@ def __init__(self, config: dict, main_loop: object): }, ContextState.SUBMITTING_STOP_JOB: { "SUCCESS": (ContextState.WAITING_STOP_JOB, lambda y: None), - "FAIL": (ContextState.STOPPING, - lambda y: ContextEvent("EXIT", - ExitCode.DBUS_FAILURE)) + "FAIL": (ContextState.REMOVING_ROUTES, + self._to_removing_routes_transition) }, ContextState.WAITING_STOP_JOB: { - "STOP_SUCCESS": (ContextState.STOPPING, - self._waiting_stop_job_success_transition), - "STOP_FAILURE": (ContextState.STOPPING, - self._waiting_stop_job_fail_transition), + "STOP_SUCCESS": (ContextState.REMOVING_ROUTES, + self._to_removing_routes_transition), + "STOP_FAILURE": (ContextState.REMOVING_ROUTES, + self._srv_fail_remove_routes_transition), "STOP": (ContextState.WAITING_STOP_JOB, lambda y: None), "UPDATE": (ContextState.WAITING_STOP_JOB, lambda y: None) }, @@ -162,7 +169,7 @@ def __init__(self, config: dict, main_loop: object): "SUCCESS": (ContextState.SUBMITTING_STOP_JOB, self._reverting_resolv_conf_transition), "FAIL": (ContextState.SUBMITTING_STOP_JOB, - self._reverting_resolv_conf_transition) + self._to_removing_routes_transition) }, ContextState.SUBMITTING_RESTART_JOB: { "SUCCESS": (ContextState.WAITING_RESTART_JOB, @@ -171,8 +178,10 @@ def __init__(self, config: dict, main_loop: object): self._submitting_restart_job_fail_transition) }, ContextState.REVERT_RESOLV_ON_FAILED_RESTART: { - "SUCCESS": (ContextState.STOPPING, self._exit_transition), - "FAIL": (ContextState.STOPPING, self._exit_transition) + "SUCCESS": (ContextState.REMOVING_ROUTES, + self._to_removing_routes_transition), + "FAIL": (ContextState.REMOVING_ROUTES, + self._to_removing_routes_transition) }, ContextState.WAITING_RESTART_JOB: { "RESTART_SUCCESS": (ContextState.POLLING, @@ -183,6 +192,10 @@ def __init__(self, config: dict, main_loop: object): self._update_transition), "STOP": (ContextState.WAITING_TO_SUBMIT_STOP_JOB, lambda y: None) + }, + ContextState.REMOVING_ROUTES: { + "SUCCESS": (ContextState.STOPPING, self._exit_transition), + "FAIL": (ContextState.STOPPING, self._exit_transition) } } @@ -226,7 +239,12 @@ def _starting_kickoff_transition(self, event: ContextEvent) \ return ContextEvent("SUCCESS") self.lgr.error("Unable to configure DNS manager") - return ContextEvent("FAIL", ExitCode.CONFIG_FAILURE) + self._set_exit_code(ExitCode.CONFIG_FAILURE) + return ContextEvent("FAIL") + + def _set_exit_code(self, code: ExitCode): + if self._exit_code == 0: + self._exit_code = code def _conf_dns_mgr_success_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -243,7 +261,8 @@ def _conf_dns_mgr_success_transition(self, event: ContextEvent) \ if (not self._connect_systemd() or not self._subscribe_systemd_signals()): self.lgr.error("Failed to connect to systemd through DBUS") - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") else: self.lgr.debug("Successfully connected to systemd through DBUS") return ContextEvent("SUCCESS") @@ -263,14 +282,21 @@ def _connecting_dbus_success_transition(self, event: ContextEvent) \ service_start_job = self._start_unit() if service_start_job is None: self.lgr.error("Failed to submit dns cache service start job") - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") self._systemd_jobs[service_start_job] = ( - ContextEvent("START_OK"), ContextEvent("START_FAIL", - ExitCode.SERVICE_FAILURE)) + ContextEvent("START_OK"), ContextEvent("START_FAIL")) # end of part that will be configured self.lgr.debug("Successfully submitted dns cache service start job") return ContextEvent("SUCCESS") + def _service_failure_exit_transition(self, event: ContextEvent) \ + -> ContextEvent | None: + self._set_exit_code(ExitCode.SERVICE_FAILURE) + self.lgr.debug("Stopping event loop and FSM") + self._main_loop.quit() + return None + def _exit_transition(self, event: ContextEvent) -> ContextEvent | None: """ Transition to STOPPING @@ -282,7 +308,6 @@ def _exit_transition(self, event: ContextEvent) -> ContextEvent | None: :rtype: ContextEvent | None """ self.lgr.debug("Stopping event loop and FSM") - self._exit_code = event.data.value self._main_loop.quit() return None @@ -301,11 +326,11 @@ def _subscribe_systemd_signals(self): def _connect_systemd(self): try: - bus = dbus.SystemBus() - self._systemd_object = bus.get_object('org.freedesktop.systemd1', - '/org/freedesktop/systemd1') + self.bus = dbus.SystemBus() + systemd_object = self.bus.get_object('org.freedesktop.systemd1', + '/org/freedesktop/systemd1') self._systemd_manager \ - = dbus.Interface(self._systemd_object, + = dbus.Interface(systemd_object, "org.freedesktop.systemd1.Manager") return True except dbus.DBusException as e: @@ -366,7 +391,8 @@ def _polling_timer_up_transition(self, event: ContextEvent) \ if event.data == 3: self.lgr.error(f"{self.dns_mgr.service_name} did not respond " "in time, stopping dnsconfd") - return ContextEvent("TIMEOUT", ExitCode.SERVICE_FAILURE) + self._set_exit_code(ExitCode.SERVICE_FAILURE) + return ContextEvent("TIMEOUT") self.lgr.debug(f"{self.dns_mgr.service_name} still not ready, " "scheduling additional poll") timer = ContextEvent("TIMER_UP", event.data + 1) @@ -391,7 +417,8 @@ def _polling_service_up_transition(self, event: ContextEvent) \ """ if not self.sys_mgr.set_resolvconf(): self.lgr.error("Failed to set up resolv.conf") - return ContextEvent("FAIL", ExitCode.RESOLV_CONF_FAILURE) + self._set_exit_code(ExitCode.RESOLV_CONF_FAILURE) + return ContextEvent("FAIL") else: self.lgr.debug("Resolv.conf successfully prepared") return ContextEvent("SUCCESS") @@ -407,15 +434,370 @@ def _running_update_transition(self, event: ContextEvent) \ :return: SUCCESS if update was successful otherwise FAIL :rtype: ContextEvent | None """ - interface_config: InterfaceConfiguration = event.data - self.interfaces[interface_config.interface_index] = interface_config + int_config: InterfaceConfiguration = event.data + if int_config is not None: + if len(int_config.servers) > 0: + self.interfaces[int_config.interface_index] = int_config + else: + self.interfaces.pop(int_config.interface_index, None) + zones_to_servers, search_domains = self._get_zones_to_servers() if not self.sys_mgr.update_resolvconf(search_domains): - return ContextEvent("FAIL", ExitCode.SERVICE_FAILURE) + self._set_exit_code(ExitCode.SERVICE_FAILURE) + return ContextEvent("FAIL") return ContextEvent("SUCCESS", zones_to_servers) + def _get_nm_device_config(self, interface): + int_name = interface.get_if_name(strict=True) + if int_name is None: + self.lgr.info(f"interface {int_name} has no name and thus " + f"we will not handle its routing") + return [], [], None + self.lgr.debug(f"Getting NetworkManager info about {int_name}") + try: + device_path = self._nm_interface.GetDeviceByIpIface(int_name) + self.lgr.debug(f"Device path is {device_path}") + device_object = self.bus.get_object("org.freedesktop" + ".NetworkManager", + device_path) + + device_properties = dbus.Interface(device_object, + "org.freedesktop" + ".DBus.Properties").GetAll( + "org.freedesktop.NetworkManager.Device") + if 40 <= device_properties["State"] < 100: + self.lgr.info(f"Interface {int_name} is not yet activated, " + f"state: {device_properties["State"]}, " + f"scheduling refresh") + upd = ContextEvent("UPDATE") + GLib.timeout_add_seconds(2, + lambda: self.transition_function(upd)) + return [], [], None + prop_interface = "org.freedesktop.DBus.Properties" + ip4_object = self.bus.get_object('org.freedesktop.NetworkManager', + device_properties["Ip4Config"]) + ip6_object = self.bus.get_object('org.freedesktop.NetworkManager', + device_properties["Ip6Config"]) + ip4_routes = dbus.Interface(ip4_object, + prop_interface).Get( + "org.freedesktop.NetworkManager.IP4Config", "RouteData") + self.lgr.debug(f"ipv4 Route data is {ip4_routes}") + ip6_routes = dbus.Interface(ip6_object, + prop_interface).Get( + "org.freedesktop.NetworkManager.IP6Config", "RouteData") + self.lgr.debug(f"ipv6 Route data is {ip6_routes}") + ip4_addresses = dbus.Interface(ip4_object, + prop_interface).Get( + "org.freedesktop.NetworkManager.IP4Config", "Addresses") + ip6_addresses = dbus.Interface(ip6_object, + prop_interface).Get( + "org.freedesktop.NetworkManager.IP6Config", "Addresses") + if len(ip4_addresses) == 0 and len(ip6_addresses) == 0: + self.lgr.info(f"interface {int_name} has no address " + "and thus we will not handle its routing") + return [], [], None + dev_int = dbus.Interface(device_object, + "org.freedesktop.NetworkManager.Device") + applied = dev_int.GetAppliedConnection(0) + self.lgr.debug(f"Applied connection is {applied}") + except dbus.DBusException as e: + self.lgr.info(f"Failed to retrieve info about {int_name} " + "from NetworkManager, will not handle its routing") + self.lgr.debug(f"{e}") + return None, None, None + + return ip4_routes, ip6_routes, applied + + def _choose_best_route(self, server_str, interface_and_routes): + best_route = None + server_ip = ipaddress.ip_address(server_str) + self.lgr.debug(f"Handling server {server_str}") + for (route_int_index, route) in interface_and_routes: + net = ipaddress.ip_network(f"{route['dest']}/{route['prefix']}") + if server_ip in net: + if (best_route is None + or best_route[1]["prefix"] < route["prefix"]): + best_route = (route_int_index, route) + elif (best_route[1]["prefix"] == route["prefix"] + and "metric" in best_route[1].keys() + and "metric" in route.keys() + and best_route[1]["metric"] > route["metric"]): + best_route = (route_int_index, route) + self.lgr.debug(f"best route is {best_route}") + return best_route + def _updating_resolv_conf_success_transition(self, event: ContextEvent) \ -> ContextEvent | None: + # here update routes and push event further, because it contains + # zones to servers + + if not self.handle_routes: + self.lgr.info("Config says we should not handle routes, skipping") + return ContextEvent("SUCCESS", data=event.data) + + # we need to refresh the dbus connection, because NetworkManager + # restart would invalidate it + + nm_object = self.bus.get_object('org.freedesktop.NetworkManager', + '/org/freedesktop/NetworkManager') + self._nm_interface = dbus.Interface(nm_object, + "org.freedesktop.NetworkManager") + + interface_and_routes = [] + interface_to_connection = {} + self.lgr.debug("Commencing route update") + + for (key, interface) in self.interfaces.items(): + ip4_rte, ip6_rte, applied = self._get_nm_device_config(interface) + if ip4_rte is None: + self._set_exit_code(ExitCode.ROUTE_FAILURE) + return ContextEvent("FAIL") + elif applied is None: + # we have to also remove routes of this interface, + # so they do not interfere with further processing + for server in interface.servers: + self.routes.pop(server.get_server_string(), None) + interface_to_connection[key] = None + continue + for route in ip4_rte + ip6_rte: + interface_and_routes.append((key, route)) + interface_to_connection[key] = applied + + self.lgr.debug(f"interface and routes is {interface_and_routes}") + self.lgr.debug("interface and connections " + f"is {interface_to_connection}") + valid_routes = {} + self.lgr.debug(f"interfaces are {self.interfaces}") + for (int_index, interface) in self.interfaces.items(): + reapply_needed = False + ifname = interface.get_if_name() + self.lgr.debug(f"Walking through servers of interface {ifname}") + if interface_to_connection[int_index] is None: + # this will ensure that routes left after downed devices + # are cleared + continue + # we need to remove this, so we can use route-data field + # undocumented NetworkManager implementation detail + del interface_to_connection[int_index][0]["ipv4"]["routes"] + del interface_to_connection[int_index][0]["ipv6"]["routes"] + connection = interface_to_connection[int_index][0] + + for server in interface.servers: + server_str = server.get_server_string() + best_route = self._choose_best_route(server_str, + interface_and_routes) + if best_route is None: + routing_right = False + else: + routing_right = best_route[0] == int_index + + if (routing_right + and best_route[1]["dest"] not in self.routes.keys()): + self.lgr.debug("Routing is right, no additional action " + "required continuing") + # this means that there is no additional action required + # as we submit only routes with prefix 32 + continue + elif routing_right: + # routing is right, but chosen route has been submitted by + # us, and could have wrong gateway + self.lgr.debug("Routing is right, but the route was " + "submitted by us, checking gateway") + def_route = None + cur_route = self.routes.get(str(best_route[1]["dest"])) + + # find interface route with prefix 0, that will show us + # gateway + for (route_int_index, route) in interface_and_routes: + if (route_int_index == int_index + and route["prefix"] == 0 + and "next-hop" in route.keys()): + def_route = (route_int_index, route) + break + + if def_route is None: + self.lgr.info( + f"Could not find default route for {ifname} " + "and thus can not check submitted route") + valid_routes[str(best_route[1]["dest"])] = cur_route + continue + if def_route[1]["next-hop"] != best_route[1]["next-hop"]: + # change connection since there is a route created + # by us that is not right + self.lgr.debug("Gateway is not right, changing") + conn = interface_to_connection[int_index][0] + for route in conn["ipv4"]["route-data"]: + if route["dest"] == best_route[1]["dest"]: + route["next-hop"] = def_route[1]["next-hop"] + dest = str(best_route[1]["dest"]) + valid_routes[dest] = route + break + reapply_needed = True + else: + self.lgr.debug("Gateway is right continuing") + valid_routes[server_str] = cur_route + else: + # routing is not right, and we must add route to fix + # the situation + duplicate = False + if best_route is not None: + for x in self.interfaces[best_route[0]].servers: + if x == server: + duplicate = True + break + if duplicate: + # different interface also should use this server, we + # should handle which one of them has priority + other_wireless = (self.interfaces[best_route[0]] + .is_interface_wireless()) + this_wireless = interface.is_interface_wireless() + other_name = (self.interfaces[best_route[0]] + .get_if_name()) + if (self.wire_priority + and other_wireless + and not this_wireless): + self.lgr.info(f"Server {server_str} is listed by " + f" both interfaces {ifname} and " + f"{other_name} but since the latter " + f"is wireless, {ifname} " + "will be the one used") + else: + self.lgr.info(f"Server {server_str} is listed by " + f"both interfaces {ifname} " + f"and {other_name} " + f"the latter will be used") + continue + + self.lgr.debug("Adding route") + def_route = None + for (route_int_index, route) in interface_and_routes: + if (route_int_index == int_index + and route["prefix"] == 0 + and "next-hop" in route.keys()): + def_route = (route_int_index, route) + break + + if def_route is None: + self.lgr.info( + f"Could not find default route for {ifname} " + "and thus will not handle routing") + continue + self.lgr.debug(f"Default route is {def_route}") + dest_str = str(def_route[1]["dest"]) + max_prefix = ipaddress.ip_address(dest_str).max_prefixlen + prefix = dbus.UInt32(max_prefix) + new_route = dbus.Dictionary({ + dbus.String("dest"): + dbus.String(server_str), + dbus.String("prefix"): + prefix, + dbus.String("next-hop"): + dbus.String(def_route[1]["next-hop"])}) + + self.lgr.debug(f"new route is {new_route}") + valid_routes[server_str] = new_route + connection["ipv4"]["route-data"].append(new_route) + reapply_needed = True + for checked_route in list(connection["ipv4"]["route-data"]): + if (str(checked_route["dest"]) not in valid_routes.keys() + and str(checked_route["dest"]) in self.routes.keys()): + connection["ipv4"]["route-data"].remove(checked_route) + reapply_needed = True + self.lgr.debug(f"Removing route {checked_route}") + for checked_route in list(connection["ipv6"]["route-data"]): + if (str(checked_route["dest"]) not in valid_routes.keys() + and str(checked_route["dest"]) in self.routes.keys()): + connection["ipv6"]["route-data"].remove(checked_route) + reapply_needed = True + self.lgr.debug(f"Removing route {checked_route}") + if reapply_needed: + self.lgr.debug("Reapplying changed connection") + device_path = self._nm_interface.GetDeviceByIpIface(ifname) + self.lgr.debug(f"Device path is {device_path}") + nm_int_str = "org.freedesktop.NetworkManager" + device_object = self.bus.get_object(nm_int_str, device_path) + dev_int_str = "org.freedesktop.NetworkManager.Device" + dev_int = dbus.Interface(device_object, + dev_int_str) + dev_int.Reapply(connection, + interface_to_connection[int_index][1], + 0) + + self.routes = valid_routes + return ContextEvent("SUCCESS", data=event.data) + + def _remove_routes(self): + nm_int_str = "org.freedesktop.NetworkManager" + dev_int_str = "org.freedesktop.NetworkManager.Device" + try: + # we need to refresh the dbus connection, because NetworkManager + # restart would invalidate it + nm_object = self.bus.get_object("org.freedesktop.NetworkManager", + "/org/freedesktop/NetworkManager") + self._nm_interface = dbus.Interface(nm_object, + nm_int_str) + except dbus.DBusException: + self.lgr.info("Failed to contact NetworkManager through dbus, " + "will not remove routes") + return ContextEvent("SUCCESS") + + for (int_index, interface) in self.interfaces.items(): + reapply_needed = False + ifname = interface.get_if_name() + try: + device_path = self._nm_interface.GetDeviceByIpIface(ifname) + device_object = self.bus.get_object(nm_int_str, + device_path) + dev_int = dbus.Interface(device_object, + dev_int_str) + connection = dev_int.GetAppliedConnection(0) + except dbus.DBusException: + self.lgr.info("Failed to retrieve info about interface " + f" {ifname}, Will not remove its routes") + continue + + for checked_route in list(connection[0]["ipv4"]["route-data"]): + if str(checked_route["dest"]) in self.routes.keys(): + connection[0]["ipv4"]["route-data"].remove(checked_route) + reapply_needed = True + self.lgr.debug(f"Removing route {checked_route}") + for checked_route in list(connection[0]["ipv6"]["route-data"]): + if str(checked_route["dest"]) in self.routes.keys(): + connection[0]["ipv6"]["route-data"].remove(checked_route) + reapply_needed = True + self.lgr.debug(f"Removing route {checked_route}") + if reapply_needed: + self.lgr.debug("Reapplying changed connection") + del connection[0]["ipv6"]["routes"] + del connection[0]["ipv4"]["routes"] + dev_int = dbus.Interface(device_object, dev_int_str) + dev_int.Reapply(connection[0], connection[1], 0) + return ContextEvent("SUCCESS") + + def _srv_fail_remove_routes_transition(self, event: ContextEvent) \ + -> ContextEvent | None: + if not self.handle_routes: + self.lgr.info("Config says we should not handle routes, skipping") + return ContextEvent("SUCCESS") + self.lgr.debug("Removing routes") + routes_str = " ".join([str(x) for x in self.routes.keys()]) + self.lgr.debug(f"routes: {routes_str}") + + self._set_exit_code(ExitCode.SERVICE_FAILURE) + return self._remove_routes() + + def _to_removing_routes_transition(self, event: ContextEvent) \ + -> ContextEvent | None: + if not self.handle_routes: + self.lgr.info("Config says we should not handle routes, skipping") + return ContextEvent("SUCCESS") + self.lgr.debug("Removing routes") + routes_str = " ".join([str(x) for x in self.routes.keys()]) + self.lgr.debug(f"routes: {routes_str}") + return self._remove_routes() + + def _updating_routes_success_transition(self, event: ContextEvent) \ + -> ContextEvent | None: """ Transition to UPDATING_DNS_MANAGER Attempt to update dns caching service with new network_objects @@ -427,7 +809,8 @@ def _updating_resolv_conf_success_transition(self, event: ContextEvent) \ """ new_zones_to_servers = event.data if not self.dns_mgr.update(new_zones_to_servers): - return ContextEvent("FAIL", ExitCode.SERVICE_FAILURE) + self._set_exit_code(ExitCode.SERVICE_FAILURE) + return ContextEvent("FAIL") return ContextEvent("SUCCESS") def _waiting_to_submit_success_transition(self, event: ContextEvent) \ @@ -447,14 +830,16 @@ def _waiting_to_submit_success_transition(self, event: ContextEvent) \ # submitting stop job while the start is running could result in # unnecessary race condition if not self._subscribe_systemd_signals(): - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") service_stop_job = self._stop_unit() if service_stop_job is None: - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") self._systemd_jobs[service_stop_job] = ( - ContextEvent("STOP_SUCCESS", ExitCode.GRACEFUL_STOP), - ContextEvent("STOP_FAILURE", ExitCode.SERVICE_FAILURE)) - return ContextEvent("SUCCESS", ExitCode.GRACEFUL_STOP) + ContextEvent("STOP_SUCCESS"), + ContextEvent("STOP_FAILURE")) + return ContextEvent("SUCCESS") def _updating_resolv_conf_fail_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -469,16 +854,19 @@ def _updating_resolv_conf_fail_transition(self, event: ContextEvent) \ """ # since we have already problems with resolv.conf, # we will be performing this without checking result + self._set_exit_code(ExitCode.RESOLV_CONF_FAILURE) self.sys_mgr.revert_resolvconf() if not self._subscribe_systemd_signals(): - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") service_stop_job = self._stop_unit() if service_stop_job is None: - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") self._systemd_jobs[service_stop_job] = ( - ContextEvent("STOP_SUCCESS", ExitCode.RESOLV_CONF_FAILURE), - ContextEvent("STOP_FAILURE", ExitCode.SERVICE_FAILURE)) - return ContextEvent("SUCCESS", ExitCode.GRACEFUL_STOP) + ContextEvent("STOP_SUCCESS"), + ContextEvent("STOP_FAILURE")) + return ContextEvent("SUCCESS") def _waiting_stop_job_success_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -507,7 +895,8 @@ def _waiting_stop_job_fail_transition(self, event: ContextEvent) \ :rtype: ContextEvent | None """ self.lgr.debug("Stop job after error failed") - return ContextEvent("EXIT", ExitCode.SERVICE_FAILURE) + self._set_exit_code(ExitCode.SERVICE_FAILURE) + return ContextEvent("EXIT") def updating_dns_manager_fail_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -522,8 +911,9 @@ def updating_dns_manager_fail_transition(self, event: ContextEvent) \ """ self.lgr.error("Failed to update DNS service, stopping") if not self.sys_mgr.revert_resolvconf(): - return ContextEvent("FAIL", ExitCode.RESOLV_CONF_FAILURE) - return ContextEvent("SUCCESS", ExitCode.GRACEFUL_STOP) + self._set_exit_code(ExitCode.RESOLV_CONF_FAILURE) + return ContextEvent("FAIL") + return ContextEvent("SUCCESS") def _reverting_resolv_conf_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -537,14 +927,16 @@ def _reverting_resolv_conf_transition(self, event: ContextEvent) \ :rtype: ContextEvent | None """ if not self._subscribe_systemd_signals(): - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") service_stop_job = self._stop_unit() if service_stop_job is None: - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") self._systemd_jobs[service_stop_job] = ( - ContextEvent("STOP_SUCCESS", ExitCode.GRACEFUL_STOP), - ContextEvent("STOP_FAILURE", ExitCode.SERVICE_FAILURE)) - return ContextEvent("SUCCESS", ExitCode.GRACEFUL_STOP) + ContextEvent("STOP_SUCCESS"), + ContextEvent("STOP_FAILURE")) + return ContextEvent("SUCCESS") def _running_stop_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -559,8 +951,17 @@ def _running_stop_transition(self, event: ContextEvent) \ """ self.lgr.info("Stopping dnsconfd") if not self.sys_mgr.revert_resolvconf(): - return ContextEvent("FAIL", ExitCode.RESOLV_CONF_FAILURE) - return ContextEvent("SUCCESS", ExitCode.GRACEFUL_STOP) + self._set_exit_code(ExitCode.RESOLV_CONF_FAILURE) + return ContextEvent("FAIL") + return ContextEvent("SUCCESS") + + def _restart_failure_stop_transition(self, event: ContextEvent) \ + -> ContextEvent | None: + self._set_exit_code(ExitCode.SERVICE_FAILURE) + self.lgr.info("Stopping dnsconfd") + if not self.sys_mgr.revert_resolvconf(): + return ContextEvent("FAIL") + return ContextEvent("SUCCESS") def _running_reload_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -576,14 +977,16 @@ def _running_reload_transition(self, event: ContextEvent) \ self.lgr.info("Reloading DNS cache service") self.dns_mgr.clear_state() if not self._subscribe_systemd_signals(): - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") service_restart_job = self._restart_unit() if service_restart_job is None: - return ContextEvent("FAIL", ExitCode.DBUS_FAILURE) + self._set_exit_code(ExitCode.DBUS_FAILURE) + return ContextEvent("FAIL") self._systemd_jobs[service_restart_job] = ( ContextEvent("RESTART_SUCCESS"), - ContextEvent("RESTART_FAIL", ExitCode.SERVICE_FAILURE)) - return ContextEvent("SUCCESS", ExitCode.GRACEFUL_STOP) + ContextEvent("RESTART_FAIL")) + return ContextEvent("SUCCESS") def _setting_up_resolve_conf_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -599,7 +1002,8 @@ def _setting_up_resolve_conf_transition(self, event: ContextEvent) \ zones_to_servers, search_domains = self._get_zones_to_servers() if not self.sys_mgr.update_resolvconf(search_domains): self.lgr.error("Failed to update resolv.conf") - return ContextEvent("FAIL", ExitCode.SERVICE_FAILURE) + self._set_exit_code(ExitCode.SERVICE_FAILURE) + return ContextEvent("FAIL") self.lgr.debug("Successfully updated resolv.conf with search domains:" f"{search_domains}") return ContextEvent("SUCCESS", zones_to_servers) @@ -617,7 +1021,8 @@ def _submitting_restart_job_fail_transition(self, event: ContextEvent) \ """ if not self.sys_mgr.revert_resolvconf(): self.lgr.error("Failed to revert resolv.conf") - return ContextEvent("FAIL", ExitCode.RESOLV_CONF_FAILURE) + self._set_exit_code(ExitCode.RESOLV_CONF_FAILURE) + return ContextEvent("FAIL") self.lgr.debug("Successfully reverted resolv.conf") return ContextEvent("SUCCESS", event.data) @@ -632,11 +1037,12 @@ def _waiting_restart_job_failure_transition(self, event: ContextEvent) \ :return: SUCCESS or FAIL with exit code :rtype: ContextEvent | None """ + self._set_exit_code(ExitCode.SERVICE_FAILURE) if not self.sys_mgr.revert_resolvconf(): self.lgr.error("Failed to revert resolv.conf") - return ContextEvent("FAIL", ExitCode.SERVICE_FAILURE) + return ContextEvent("FAIL") self.lgr.debug("Successfully reverted resolv.conf") - return ContextEvent("SUCCESS", ExitCode.SERVICE_FAILURE) + return ContextEvent("SUCCESS") def _update_transition(self, event: ContextEvent) \ -> ContextEvent | None: @@ -649,8 +1055,13 @@ def _update_transition(self, event: ContextEvent) \ :return: None :rtype: ContextEvent | None """ - interface_config: InterfaceConfiguration = event.data - self.interfaces[interface_config.interface_index] = interface_config + if event.data is None: + return None + if_config: InterfaceConfiguration = event.data + if len(if_config.servers) > 0: + self.interfaces[if_config.interface_index] = if_config + else: + self.interfaces.pop(if_config.interface_index, None) return None def _start_unit(self): diff --git a/dnsconfd/fsm/exit_code.py b/dnsconfd/fsm/exit_code.py index 11e5501..3813342 100644 --- a/dnsconfd/fsm/exit_code.py +++ b/dnsconfd/fsm/exit_code.py @@ -8,3 +8,4 @@ class ExitCode(Enum): DBUS_FAILURE = 2 RESOLV_CONF_FAILURE = 3 CONFIG_FAILURE = 4 + ROUTE_FAILURE = 5 diff --git a/dnsconfd/input_modules/resolve_dbus_interface.py b/dnsconfd/input_modules/resolve_dbus_interface.py index 96b12c5..1f6606b 100644 --- a/dnsconfd/input_modules/resolve_dbus_interface.py +++ b/dnsconfd/input_modules/resolve_dbus_interface.py @@ -15,10 +15,7 @@ def __init__(self, runtime_context: DnsconfdContext, config): dbus.SystemBus())) self.interfaces: dict[int, InterfaceConfiguration] = {} self.runtime_context = runtime_context - if config["prioritize_wire"] is True: - self.prio_wire = config["prioritize_wire"] - else: - self.prio_wire = config["prioritize_wire"] == "yes" + self.prio_wire = config["prioritize_wire"] is True self.lgr = logging.getLogger(self.__class__.__name__) # Implements systemd-resolved interfaces defined at: @@ -65,7 +62,6 @@ def SetLinkDomains(self, interface_index: int, domains: list[(str, bool)]): self.lgr.debug("SetLinkDomains called, interface index: " f"{interface_index}, domains: {domains}") interface_cfg = self._iface_config(interface_index) - interface_cfg.finished = False interface_cfg.domains = [(str(domain), bool(is_routing)) for domain, is_routing in domains] self._update_if_ready(interface_cfg) diff --git a/dnsconfd/network_objects/interface_configuration.py b/dnsconfd/network_objects/interface_configuration.py index 8dfd112..e4e70e8 100644 --- a/dnsconfd/network_objects/interface_configuration.py +++ b/dnsconfd/network_objects/interface_configuration.py @@ -32,12 +32,12 @@ def __init__(self, default (Highest priority), defaults to False :type is_default: bool, optional """ - self.domains = domains - self.servers = servers - self.dns_over_tls = dns_over_tls - self.dnssec = dnssec - self.is_default = is_default - self.interface_index = interface_index + self.domains: list[tuple[str, bool]] = domains + self.servers: list[ServerDescription] = servers + self.dns_over_tls: bool = dns_over_tls + self.dnssec: bool = dnssec + self.is_default: bool = is_default + self.interface_index: int = interface_index def is_ready(self) -> bool: """ Get whether this interface is ready for insertion into cache @@ -77,7 +77,7 @@ def is_interface_wireless(self) -> bool: except OSError: return False - def get_if_name(self) -> str: + def get_if_name(self, strict=False) -> str | None: """ Get interface name :return: Name of the interface, if socket is unable @@ -87,7 +87,7 @@ def get_if_name(self) -> str: try: return socket.if_indextoname(self.interface_index) except OSError: - return str(self.interface_index) + return str(self.interface_index) if not strict else None def to_dict(self) -> dict: """ Get dictionary containing all information about interface diff --git a/dnsconfd/network_objects/server_description.py b/dnsconfd/network_objects/server_description.py index 451621b..84787c8 100644 --- a/dnsconfd/network_objects/server_description.py +++ b/dnsconfd/network_objects/server_description.py @@ -47,6 +47,9 @@ def to_unbound_string(self) -> str: srv_string += f"#{self.sni}" return srv_string + def get_server_string(self): + return socket.inet_ntop(self.address_family, self.address) + def __eq__(self, __value: object) -> bool: try: __value: ServerDescription diff --git a/docs/conf.py b/docs/conf.py index 7c40f0d..20470e0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ author = 'Tomas Korbar, Petr Mensik' # The full version, including alpha/beta/rc tags -release = '1.0.2' +release = '1.1.2' # -- General network_objects --------------------------------------------------- diff --git a/setup.py b/setup.py index 0012ff7..8e66891 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ setup( name='dnsconfd', - version='1.0.2', + version='1.1.2', install_requires=[ 'dbus-python', 'pyyaml' diff --git a/tests/build_package.sh b/tests/build_package.sh index 12792dd..e0382f9 100755 --- a/tests/build_package.sh +++ b/tests/build_package.sh @@ -3,14 +3,14 @@ set -e tempdir=$(mktemp -d) -mkdir "$tempdir"/dnsconfd-1.0.2 -cp -r ./* "$tempdir"/dnsconfd-1.0.2 +mkdir "$tempdir"/dnsconfd-1.1.2 +cp -r ./* "$tempdir"/dnsconfd-1.1.2 pushd "$tempdir" -tar -czvf "$tempdir"/dnsconfd-1.0.2.tar.gz dnsconfd-1.0.2 +tar -czvf "$tempdir"/dnsconfd-1.1.2.tar.gz dnsconfd-1.1.2 popd -mv "$tempdir"/dnsconfd-1.0.2.tar.gz ./distribution +mv "$tempdir"/dnsconfd-1.1.2.tar.gz ./distribution pushd distribution fedpkg --release=f40 mockbuild -mv ./results_dnsconfd/1.0.2/1.fc40/*.noarch.rpm ../tests +mv ./results_dnsconfd/1.1.2/1.fc40/*.noarch.rpm ../tests popd rm -rf "$tempdir" diff --git a/tests/dns-over-tls/test.sh b/tests/dns-over-tls/test.sh index 3efefb4..9037152 100755 --- a/tests/dns-over-tls/test.sh +++ b/tests/dns-over-tls/test.sh @@ -20,6 +20,10 @@ rlJournalStart rlPhaseStartTest rlRun "podman cp ca_cert.pem $dnsconfd_cid://etc/pki/ca-trust/source/anchors/ca_cert.pem" 0 "Installing CA" rlRun "podman exec $dnsconfd_cid update-ca-trust extract" 0 "updating CA trust" + # this is necessary, because if ca trust is not in place before unbound start then verification of + # server certificate fails + rlRun "podman exec $dnsconfd_cid systemctl restart unbound" + sleep 5 rlRun "podman exec $dnsconfd_cid nmcli connection mod eth0 connection.dns-over-tls 2" 0 "Enabling dns over tls" rlRun "podman exec $dnsconfd_cid nmcli connection mod eth0 ipv4.dns 192.168.6.3#named" 0 "Adding dns server to NM active profile" # we have to restart, otherwise NM will attempt to change ipv6 and because it has no permissions, it will fail diff --git a/tests/dnsconfd-test-utilities.Dockerfile b/tests/dnsconfd-test-utilities.Dockerfile index 013aaf0..6e3d3aa 100644 --- a/tests/dnsconfd-test-utilities.Dockerfile +++ b/tests/dnsconfd-test-utilities.Dockerfile @@ -1,7 +1,7 @@ FROM quay.io/fedora/fedora:38 RUN dnf install -y --setopt=tsflags=nodocs --setopt=install_weak_deps=False dhcp-server \ - dnsmasq openvpn easy-rsa bind bind-utils bind-dnssec-utils openssl && dnf -y clean all + dnsmasq openvpn easy-rsa bind bind-utils bind-dnssec-utils openssl iproute iputils iptables-nft && dnf -y clean all # DHCP PART COPY dhcpd-common.conf dhcpd-empty.conf /etc/dhcp/ diff --git a/tests/dnsconfd.Dockerfile b/tests/dnsconfd.Dockerfile index 62eb030..990cfc9 100644 --- a/tests/dnsconfd.Dockerfile +++ b/tests/dnsconfd.Dockerfile @@ -2,7 +2,7 @@ FROM quay.io/fedora/fedora:40 COPY ./*.noarch.rpm ./ RUN dnf install -y --setopt=install_weak_deps=False --setopt=tsflags=nodocs systemd \ - NetworkManager dhcp-client iproute ./*.rpm openvpn NetworkManager-openvpn sssd-client polkit bind-utils bind-dnssec-utils + NetworkManager dhcp-client iproute ./*.rpm openvpn NetworkManager-openvpn sssd-client polkit bind-utils bind-dnssec-utils iptables-nft # we will replace the path in code only for testing purposes # accessing sysfs in the container could be dangerous for the host machine and would require diff --git a/tests/routing/main.fmf b/tests/routing/main.fmf new file mode 100644 index 0000000..60b82e7 --- /dev/null +++ b/tests/routing/main.fmf @@ -0,0 +1,8 @@ +summary: | + Test whether dnsconfd is able to properly create routes to DNS servers +test: ./test.sh +framework: beakerlib +recommend: + - podman +tag: + - integration diff --git a/tests/routing/test.sh b/tests/routing/test.sh new file mode 100755 index 0000000..81b5b32 --- /dev/null +++ b/tests/routing/test.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# vim: dict+=/usr/share/beakerlib/dictionary.vim cpt=.,w,b,u,t,i,k +. /usr/share/beakerlib/beakerlib.sh || exit 1 +DBUS_NAME=org.freedesktop.resolve1 +ORIG_DIR=$(pwd) + +rlJournalStart + rlPhaseStartSetup + rlRun "tmp=\$(mktemp -d)" 0 "Create tmp directory" + rlRun "pushd $tmp" + rlRun "set -o pipefail" + rlRun "podman network create dnsconfd_network --internal -d=bridge --gateway=192.168.6.1 --subnet=192.168.6.0/24" + rlRun "podman network create dnsconfd_network2 --internal -d=bridge --gateway=192.168.7.1 --subnet=192.168.7.0/24" + rlRun "podman network create dnsconfd_network3 --internal -d=bridge --gateway=192.168.8.1 --subnet=192.168.8.0/24" + # dns=none is neccessary, because otherwise resolv.conf is created and + # mounted by podman as read-only + rlRun "dnsconfd_cid=\$(podman run -d --dns='none' --cap-add=NET_ADMIN --cap-add=NET_RAW --network dnsconfd_network:ip=192.168.6.2 --network dnsconfd_network2:ip=192.168.7.2\ + dnsconfd_testing:latest)" 0 "Starting dnsconfd container" + + rlRun "dnsmasq1_cid=\$(podman run -d --dns='none' --network dnsconfd_network:ip=192.168.6.3 localhost/dnsconfd_utilities:latest\ + dnsmasq_entry.sh --listen-address=192.168.6.3 --address=/first-address.test.com/192.168.6.3)" 0 "Starting first dnsmasq container" + + rlRun "dnsmasq2_cid=\$(podman run -d --dns='none' --network dnsconfd_network2:ip=192.168.7.3 --network dnsconfd_network3:ip=192.168.8.2 --cap-add=NET_ADMIN --cap-add=NET_RAW --privileged localhost/dnsconfd_utilities:latest\ + /bin/bash -c 'sleep 10000')" 0 "Starting routing container" + + rlRun "dnsmasq3_cid=\$(podman run -d --dns='none' --network dnsconfd_network3:ip=192.168.8.3 localhost/dnsconfd_utilities:latest\ + dnsmasq_entry.sh --listen-address=192.168.8.3 --address=/second-address.test.com/192.168.8.3)" 0 "Starting second dnsmasq container" + + rlPhaseEnd + + rlPhaseStartTest + sleep 2 + rlRun "podman exec $dnsmasq2_cid /bin/bash -c 'echo 1 > /proc/sys/net/ipv4/ip_forward'" 0 "enable ip forwarding on routing server" + rlRun "podman exec $dnsmasq2_cid iptables -t nat -I POSTROUTING -o eth1 -j MASQUERADE" 0 "enable masquerade on routing server" + sleep 2 + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth0 | grep 192.168.6.2 && nmcli connection mod eth0 ipv4.dns 192.168.6.3 && nmcli connection mod eth0 ipv4.gateway 192.168.6.1 || true'" 0 "Adding dns server to the first NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth0 | grep 192.168.7.2 && nmcli connection mod eth0 ipv4.dns 192.168.8.3 && nmcli connection mod eth0 ipv4.gateway 192.168.7.3 || true'" 0 "Adding dns server to the first NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth1 | grep 192.168.6.2 && nmcli connection mod eth1 ipv4.dns 192.168.6.3 && nmcli connection mod eth0 ipv4.gateway 192.168.6.1 || true'" 0 "Adding dns server to the second NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth1 | grep 192.168.7.2 && nmcli connection mod eth1 ipv4.dns 192.168.8.3 && nmcli connection mod eth1 ipv4.gateway 192.168.7.3 || true'" 0 "Adding dns server to the second NM active profile" + # now the connection listing DNS server 192.168.8.3 should be used for routing (dnsconfd->192.168.7.3->192.168.8.3) + rlRun "podman exec $dnsconfd_cid nmcli connection up eth0" + rlRun "podman exec $dnsconfd_cid nmcli connection up eth1" + sleep 5 + #rlRun "diff status1 $ORIG_DIR/expected_status.json || diff status1 $ORIG_DIR/expected_status2.json" 0 "verifying status" + rlRun "podman exec $dnsconfd_cid getent hosts first-address.test.com | grep 192.168.6.3" 0 "Verifying correct address resolution" + rlRun "podman exec $dnsconfd_cid getent hosts second-address.test.com | grep 192.168.8.3" 0 "Verifying correct address resolution" + rlPhaseEnd + + rlPhaseStartCleanup + rlRun "podman exec $dnsconfd_cid journalctl -u dnsconfd" 0 "Saving logs" + rlRun "popd" + rlRun "podman stop -t 2 $dnsconfd_cid $dnsmasq1_cid $dnsmasq2_cid $dnsmasq3_cid" 0 "Stopping containers" + rlRun "podman container rm $dnsconfd_cid $dnsmasq1_cid $dnsmasq2_cid $dnsmasq3_cid" 0 "Removing containers" + rlRun "podman network rm dnsconfd_network dnsconfd_network2 dnsconfd_network3" 0 "Removing networks" + rlRun "rm -r $tmp" 0 "Remove tmp directory" + rlPhaseEnd +rlJournalEnd diff --git a/tests/two-interfaces/expected_status2.json b/tests/two-interfaces/expected_status2.json new file mode 100644 index 0000000..30b1273 --- /dev/null +++ b/tests/two-interfaces/expected_status2.json @@ -0,0 +1 @@ +{"service": "unbound", "cache_config": {".": ["192.168.6.3", "192.168.7.3"]}, "state": "RUNNING", "interfaces": [{"domains": [], "servers": ["192.168.6.3"], "dns_over_tls": false, "dnssec": false, "is_default": 1, "interface_name": "eth0"}, {"domains": [], "servers": ["192.168.7.3"], "dns_over_tls": false, "dnssec": false, "is_default": 1, "interface_name": "eth1"}]} diff --git a/tests/two-interfaces/test.sh b/tests/two-interfaces/test.sh index bb345c9..b496e0b 100755 --- a/tests/two-interfaces/test.sh +++ b/tests/two-interfaces/test.sh @@ -23,12 +23,13 @@ rlJournalStart rlPhaseStartTest sleep 2 - rlRun "podman exec $dnsconfd_cid nmcli connection mod eth1 ipv4.dns 192.168.6.3" 0 "Adding dns server to the first NM active profile" - rlRun "podman exec $dnsconfd_cid nmcli connection mod eth0 ipv4.dns 192.168.7.3" 0 "Adding dns server to the second NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth0 | grep 192.168.6.2 && nmcli connection mod eth0 ipv4.dns 192.168.6.3 || true'" 0 "Adding dns server to the first NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth0 | grep 192.168.7.2 && nmcli connection mod eth0 ipv4.dns 192.168.7.3 || true'" 0 "Adding dns server to the first NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth1 | grep 192.168.6.2 && nmcli connection mod eth1 ipv4.dns 192.168.6.3 || true'" 0 "Adding dns server to the second NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth1 | grep 192.168.7.2 && nmcli connection mod eth1 ipv4.dns 192.168.7.3 || true'" 0 "Adding dns server to the second NM active profile" sleep 2 - rlRun "podman exec $dnsconfd_cid dnsconfd --dbus-name=$DBUS_NAME status --json > status1" 0 "Getting status of dnsconfd" - rlRun "cat status1" - rlAssertNotDiffer status1 $ORIG_DIR/expected_status.json + rlRun "podman exec $dnsconfd_cid dnsconfd --dbus-name=$DBUS_NAME status --json" 0 "Getting status of dnsconfd" + #rlRun "diff status1 $ORIG_DIR/expected_status.json || diff status1 $ORIG_DIR/expected_status2.json" 0 "verifying status" rlRun "podman exec $dnsconfd_cid getent hosts first-address.test.com | grep 192.168.6.3" 0 "Verifying correct address resolution" rlRun "podman exec $dnsconfd_cid getent hosts second-address.test.com | grep 192.168.7.3" 0 "Verifying correct address resolution" rlPhaseEnd diff --git a/tests/vpn.conf b/tests/vpn.conf index e93f89b..864c1fd 100644 --- a/tests/vpn.conf +++ b/tests/vpn.conf @@ -1,8 +1,9 @@ port 1194 dev tun +topology subnet # Use "local" to set the source address on multi-homed hosts -#local [IP address] +local 192.168.6.30 # TLS parms tls-server @@ -14,14 +15,16 @@ dh /etc/openvpn/easy-rsa/pki/dh.pem # Tell OpenVPN to be a multi-client udp server mode server +server 10.8.0.0 255.255.255.0 + # The server's virtual endpoints -ifconfig 10.8.0.1 10.8.0.2 +#ifconfig 10.8.0.1 10.8.0.2 # Pool of /30 subnets to be allocated to clients. # When a client connects, an --ifconfig command # will be automatically generated and pushed back to # the client. -ifconfig-pool 10.8.0.4 10.8.0.255 +#ifconfig-pool 10.8.0.4 10.8.0.255 # Push route to client to bind it to our local # virtual endpoint. @@ -29,11 +32,11 @@ push "route 10.8.0.1 255.255.255.255" # Push any routes the client needs to get in # to the local network. -push "route 192.168.0.0 255.255.255.0" +push "route 192.168.7.0 255.255.255.0" # Push DHCP options to Windows clients. push "dhcp-option DOMAIN vpndomain.com" -push "dhcp-option DNS 192.168.6.5" +push "dhcp-option DNS 192.168.7.2" # Client should attempt reconnection on link # failure. diff --git a/tests/vpn/expected_status2.json b/tests/vpn/expected_status2.json index 200e5d4..15177c2 100644 --- a/tests/vpn/expected_status2.json +++ b/tests/vpn/expected_status2.json @@ -1 +1 @@ -{"service": "unbound", "cache_config": {"test.com": ["192.168.6.3", "192.168.6.4"], ".": ["192.168.6.3", "192.168.6.4"], "vpndomain.com": ["192.168.6.5"]}, "state": "RUNNING", "interfaces": [{"domains": [["test.com", false]], "servers": ["192.168.6.3", "192.168.6.4"], "dns_over_tls": false, "dnssec": false, "is_default": 1, "interface_name": "eth0"}, {"domains": [["vpndomain.com", false]], "servers": ["192.168.6.5"], "dns_over_tls": false, "dnssec": false, "is_default": 0, "interface_name": "tun0"}]} +{"service": "unbound", "cache_config": {"test.com": ["192.168.6.3", "192.168.6.4"], ".": ["192.168.6.3", "192.168.6.4"], "vpndomain.com": ["192.168.7.2"]}, "state": "RUNNING", "interfaces": [{"domains": [["test.com", false]], "servers": ["192.168.6.3", "192.168.6.4"], "dns_over_tls": false, "dnssec": false, "is_default": 1, "interface_name": "eth0"}, {"domains": [["vpndomain.com", false]], "servers": ["192.168.7.2"], "dns_over_tls": false, "dnssec": false, "is_default": 0, "interface_name": "tun0"}]} diff --git a/tests/vpn/test.sh b/tests/vpn/test.sh index adf5ceb..6c7480e 100755 --- a/tests/vpn/test.sh +++ b/tests/vpn/test.sh @@ -11,18 +11,21 @@ rlJournalStart rlRun "tmp=\$(mktemp -d)" 0 "Create tmp directory" rlRun "pushd $tmp" rlRun "set -o pipefail" - rlRun "podman network create dnsconfd_network --internal -d=bridge --gateway=192.168.6.1 --subnet=192.168.6.0/24" + rlRun "podman network create dnsconfd_network --internal -d=bridge --gateway=192.168.6.1 --subnet=192.168.6.0/24" + rlRun "podman network create dnsconfd_network2 --internal -d=bridge --gateway=192.168.7.1 --subnet=192.168.7.0/24" # dns=none is neccessary, because otherwise resolv.conf is created and # mounted by podman as read-only rlRun "dhcp_cid=\$(podman run -d --cap-add=NET_RAW --network dnsconfd_network:ip=192.168.6.20 localhost/dnsconfd_utilities:latest dhcp_entry.sh /etc/dhcp/dhcpd-common.conf)" 0 "Starting dhcpd container" - rlRun "vpn_cid=\$(podman run -d --cap-add=NET_ADMIN --cap-add=NET_RAW --security-opt label=disable --device=/dev/net/tun --network dnsconfd_network:ip=192.168.6.30 localhost/dnsconfd_utilities:latest vpn_entry.sh)" + rlRun "vpn_cid=\$(podman run -d --cap-add=NET_ADMIN --cap-add=NET_RAW --privileged --security-opt label=disable --device=/dev/net/tun --network dnsconfd_network:ip=192.168.6.30 --network dnsconfd_network2:ip=192.168.7.3 localhost/dnsconfd_utilities:latest vpn_entry.sh)" rlRun "dnsconfd_cid=\$(podman run -d --cap-add=NET_ADMIN --cap-add=NET_RAW --security-opt label=disable --device=/dev/net/tun --dns='none' --network dnsconfd_network:ip=192.168.6.2 dnsconfd_testing:latest)" 0 "Starting dnsconfd container" rlRun "dnsmasq1_cid=\$(podman run -d --dns='none' --network dnsconfd_network:ip=192.168.6.3 localhost/dnsconfd_utilities:latest dnsmasq_entry.sh --listen-address=192.168.6.3 --address=/first-address.test.com/192.168.6.3)" 0 "Starting first dnsmasq container" rlRun "dnsmasq2_cid=\$(podman run -d --dns='none' --network dnsconfd_network:ip=192.168.6.4 localhost/dnsconfd_utilities:latest dnsmasq_entry.sh --listen-address=192.168.6.4 --address=/second-address.test.com/192.168.6.4)" 0 "Starting second dnsmasq container" - rlRun "dnsmasq3_cid=\$(podman run -d --dns='none' --network dnsconfd_network:ip=192.168.6.5 localhost/dnsconfd_utilities:latest dnsmasq_entry.sh --listen-address=192.168.6.5 --address=/dummy.vpndomain.com/192.168.6.5)" 0 "Starting third dnsmasq container" + rlRun "dnsmasq3_cid=\$(podman run -d --dns='none' --network dnsconfd_network2:ip=192.168.7.2 localhost/dnsconfd_utilities:latest dnsmasq_entry.sh --listen-address=192.168.7.2 --address=/dummy.vpndomain.com/192.168.6.5)" 0 "Starting third dnsmasq container" rlPhaseEnd rlPhaseStartTest + rlRun "podman exec $vpn_cid /bin/bash -c 'echo 1 > /proc/sys/net/ipv4/ip_forward'" 0 "enable ip forwarding on vpn server" + rlRun "podman exec $vpn_cid iptables -t nat -I POSTROUTING -o eth1 -j MASQUERADE" 0 "enable masquerade on vpn server" sleep 2 rlRun "podman exec $dnsconfd_cid nmcli connection mod eth0 connection.autoconnect yes ipv4.gateway '' ipv4.addr '' ipv4.method auto" 0 "Setting eth0 to autoconfiguration" sleep 2 @@ -50,7 +53,7 @@ rlJournalStart rlRun "popd" rlRun "podman stop -t 2 $dnsconfd_cid $dnsmasq1_cid $dnsmasq2_cid $dnsmasq3_cid $dhcp_cid $vpn_cid" 0 "Stopping containers" rlRun "podman container rm $dnsconfd_cid $dnsmasq1_cid $dnsmasq2_cid $dnsmasq3_cid $dhcp_cid $vpn_cid" 0 "Removing containers" - rlRun "podman network rm dnsconfd_network" 0 "Removing networks" + rlRun "podman network rm dnsconfd_network dnsconfd_network2" 0 "Removing networks" rlRun "rm -r $tmp" 0 "Remove tmp directory" rlPhaseEnd rlJournalEnd diff --git a/tests/wireless/expected_status2.json b/tests/wireless/expected_status2.json new file mode 100644 index 0000000..022f5b2 --- /dev/null +++ b/tests/wireless/expected_status2.json @@ -0,0 +1 @@ +{"service": "unbound", "cache_config": {".": ["192.168.7.3", "192.168.6.3"]}, "state": "RUNNING", "interfaces": [{"domains": [], "servers": ["192.168.6.3"], "dns_over_tls": false, "dnssec": false, "is_default": 1, "interface_name": "eth1"}, {"domains": [], "servers": ["192.168.7.3"], "dns_over_tls": false, "dnssec": false, "is_default": 1, "interface_name": "eth0"}]} diff --git a/tests/wireless/test.sh b/tests/wireless/test.sh index bad5723..b7096f1 100755 --- a/tests/wireless/test.sh +++ b/tests/wireless/test.sh @@ -20,14 +20,14 @@ rlJournalStart rlPhaseStartTest sleep 2 - rlRun "podman exec $dnsconfd_cid mkdir -p /tmp/is_wireless/eth1/wireless" 0 "Mocking wireless interface" - rlRun "podman exec $dnsconfd_cid nmcli connection mod eth1 ipv4.dns 192.168.6.3" 0 "Adding dns server to the first NM active profile" - rlRun "podman exec $dnsconfd_cid nmcli connection mod eth0 ipv4.dns 192.168.7.3" 0 "Adding dns server to the second NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth0 | grep 192.168.6.2 && mkdir -p /tmp/is_wireless/eth0/wireless && nmcli connection mod eth0 ipv4.dns 192.168.6.3 || true'" 0 "Adding dns server to the first NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth0 | grep 192.168.7.2 && nmcli connection mod eth0 ipv4.dns 192.168.7.3 || true'" 0 "Adding dns server to the first NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth1 | grep 192.168.6.2 && mkdir -p /tmp/is_wireless/eth1/wireless && nmcli connection mod eth1 ipv4.dns 192.168.6.3 || true'" 0 "Adding dns server to the second NM active profile" + rlRun "podman exec $dnsconfd_cid /bin/bash -c 'nmcli connection show eth1 | grep 192.168.7.2 && nmcli connection mod eth1 ipv4.dns 192.168.7.3 || true'" 0 "Adding dns server to the second NM active profile" sleep 2 - rlRun "podman exec $dnsconfd_cid dnsconfd --dbus-name=$DBUS_NAME status --json > status1" 0 "Getting status of dnsconfd" # in this test we are verifying that the DNS of non-wireless interface has higher priority # than the wireless one - rlAssertNotDiffer status1 $ORIG_DIR/expected_status.json + # rlRun "diff status1 $ORIG_DIR/expected_status.json || diff status1 $ORIG_DIR/expected_status2.json" 0 "verifying status" rlRun "podman exec $dnsconfd_cid getent hosts first-address.test.com | grep 192.168.7.3" 0 "Verifying correct address resolution" rlRun "podman exec $dnsconfd_cid getent hosts second-address.test.com | grep 192.168.8.3" 0 "Verifying correct address resolution" rlPhaseEnd