diff --git a/backport-Do-not-generate-dsa-and-ed25519-key-types-when-crypt.patch b/backport-Do-not-generate-dsa-and-ed25519-key-types-when-crypt.patch new file mode 100644 index 0000000000000000000000000000000000000000..3f729e74e7a5b4bd72a951808ad8c83a0cd9495f --- /dev/null +++ b/backport-Do-not-generate-dsa-and-ed25519-key-types-when-crypt.patch @@ -0,0 +1,231 @@ +From c53f04aeb2acf9526a2ebf3d3320f149ac46caa6 Mon Sep 17 00:00:00 2001 +From: Ani Sinha +Date: Tue, 2 May 2023 20:35:45 +0530 +Subject: [PATCH] Do not generate dsa and ed25519 key types when crypto FIPS + mode is enabled (#2142) + +Reference:https://github.com/canonical/cloud-init/commit/c53f04aeb2acf9526a2ebf3d3320f149ac46caa6 +Conflict:(1)Add extra information to cc_ssh.py: ++import logging ++LOG = logging.getLogger(__name__) +(2)format diffs. +(3)add 'M_PATH = "cloudinit.util."' in test_util.py + +DSA and ED25519 key types are not supported when FIPS is enabled in crypto. +Check if FIPS has been enabled on the system and if so, do not generate those +key types. Presently the check is only available on Linux systems. + +LP: 2017761 +RHBZ: 2187164 + +Signed-off-by: Ani Sinha +--- + cloudinit/config/cc_ssh.py | 23 ++++++++++++++++- + cloudinit/util.py | 12 +++++++++ + cloudinit/config/tests/test_ssh.py | 40 ++++++++++++++++++++++-------- + tests/unittests/test_util.py | 26 +++++++++++++++++++ + 4 files changed, 90 insertions(+), 11 deletions(-) + +diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py +index 1053ab6..2fda565 100755 +--- a/cloudinit/config/cc_ssh.py ++++ b/cloudinit/config/cc_ssh.py +@@ -167,8 +167,12 @@ from cloudinit import ssh_util + from cloudinit import subp + from cloudinit import util + ++import logging ++LOG = logging.getLogger(__name__) + + GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] ++FIPS_UNSUPPORTED_KEY_NAMES = ["dsa", "ed25519"] ++ + KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' + PUBLISH_HOST_KEYS = True + # Don't publish the dsa hostkey by default since OpenSSH recommends not using +@@ -231,9 +235,26 @@ def handle(_name, cfg, cloud, log, _args): + genkeys = util.get_cfg_option_list(cfg, + 'ssh_genkeytypes', + GENERATE_KEY_NAMES) ++ # remove keys that are not supported in fips mode if its enabled ++ key_names = ( ++ genkeys ++ if not util.fips_enabled() ++ else [ ++ names ++ for names in genkeys ++ if names not in FIPS_UNSUPPORTED_KEY_NAMES ++ ] ++ ) ++ skipped_keys = set(genkeys).difference(key_names) ++ if skipped_keys: ++ LOG.debug( ++ "skipping keys that are not supported in fips mode: %s", ++ ",".join(skipped_keys), ++ ) ++ + lang_c = os.environ.copy() + lang_c['LANG'] = 'C' +- for keytype in genkeys: ++ for keytype in key_names: + keyfile = KEY_FILE_TPL % (keytype) + if os.path.exists(keyfile): + continue +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 78164de..c18aecf 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -1418,6 +1418,18 @@ def get_cmdline(): + return _get_cmdline() + + ++def fips_enabled() -> bool: ++ fips_proc = "/proc/sys/crypto/fips_enabled" ++ try: ++ contents = load_file(fips_proc).strip() ++ return contents == "1" ++ except (IOError, OSError): ++ # for BSD systems and Linux systems where the proc entry is not ++ # available, we assume FIPS is disabled to retain the old behavior ++ # for now. ++ return False ++ ++ + def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None): + bytes_piped = 0 + while True: +diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py +index 714949c..b8fb610 100644 +--- a/cloudinit/config/tests/test_ssh.py ++++ b/cloudinit/config/tests/test_ssh.py +@@ -91,12 +91,16 @@ class TestHandleSsh: + expected_calls = [mock.call(set(keys), user)] + expected_calls + assert expected_calls == m_setup_keys.call_args_list + ++ @pytest.mark.parametrize("fips_enabled", (True, False)) + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") +- def test_handle_no_cfg(self, m_path_exists, m_nug, +- m_glob, m_setup_keys): ++ @mock.patch(MODPATH + "util.fips_enabled") ++ def test_handle_no_cfg( ++ self, m_fips, m_path_exists, m_nug, m_glob, m_setup_keys, fips_enabled ++ ): + """Test handle with no config ignores generating existing keyfiles.""" ++ m_fips.return_value = fips_enabled + cfg = {} + keys = ["key1"] + m_glob.return_value = [] # Return no matching keys to prevent removal +@@ -109,12 +113,22 @@ class TestHandleSsh: + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") + m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') +- assert [ +- mock.call("/etc/ssh/ssh_host_rsa_key"), +- mock.call("/etc/ssh/ssh_host_dsa_key"), +- mock.call("/etc/ssh/ssh_host_ecdsa_key"), +- mock.call("/etc/ssh/ssh_host_ed25519_key"), +- ] in m_path_exists.call_args_list ++ m_fips.assert_called_once() ++ ++ if not m_fips(): ++ expected_calls = [ ++ mock.call("/etc/ssh/ssh_host_rsa_key"), ++ mock.call("/etc/ssh/ssh_host_dsa_key"), ++ mock.call("/etc/ssh/ssh_host_ecdsa_key"), ++ mock.call("/etc/ssh/ssh_host_ed25519_key"), ++ ] ++ else: ++ # Enabled fips doesn't generate dsa or ed25519 ++ expected_calls = [ ++ mock.call("/etc/ssh/ssh_host_rsa_key"), ++ mock.call("/etc/ssh/ssh_host_ecdsa_key"), ++ ] ++ assert expected_calls in m_path_exists.call_args_list + assert [ + mock.call(set(keys), "root", options=options) + ] == m_setup_keys.call_args_list +@@ -122,8 +136,10 @@ class TestHandleSsh: + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") +- def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug, +- m_glob, m_setup_keys): ++ @mock.patch(MODPATH + "util.fips_enabled", return_value=False) ++ def test_dont_allow_public_ssh_keys( ++ self, m_fips, m_path_exists, m_nug, m_glob, m_setup_keys ++ ): + """Test allow_public_ssh_keys=False ignores ssh public keys from + platform. + """ +@@ -166,8 +182,10 @@ class TestHandleSsh: + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") ++ @mock.patch(MODPATH + "util.fips_enabled", return_value=False) + def test_handle_default_root( + self, ++ m_fips, + m_path_exists, + m_nug, + m_glob, +@@ -232,8 +250,10 @@ class TestHandleSsh: + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") ++ @mock.patch(MODPATH + "util.fips_enabled", return_value=False) + def test_handle_publish_hostkeys( + self, ++ m_fips, + m_path_exists, + m_nug, + m_glob, +diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py +index 2ab3bad..7368b3b 100644 +--- a/tests/unittests/test_util.py ++++ b/tests/unittests/test_util.py +@@ -16,6 +16,7 @@ from cloudinit import subp + from cloudinit import importer, util + from cloudinit.tests import helpers + ++M_PATH = "cloudinit.util." + + class FakeSelinux(object): + +@@ -287,6 +287,31 @@ class TestGetCmdline(helpers.TestCase): + self.assertEqual("abcd 123", ret) + + ++class TestFipsEnabled: ++ @pytest.mark.parametrize( ++ "fips_enabled_content,expected", ++ ( ++ pytest.param(None, False, id="false_when_no_fips_enabled_file"), ++ pytest.param("0\n", False, id="false_when_fips_disabled"), ++ pytest.param("1\n", True, id="true_when_fips_enabled"), ++ pytest.param("1", True, id="true_when_fips_enabled_no_newline"), ++ ), ++ ) ++ @mock.patch(M_PATH + "load_file") ++ def test_fips_enabled_based_on_proc_crypto( ++ self, load_file, fips_enabled_content, expected, tmpdir ++ ): ++ def fake_load_file(path): ++ assert path == "/proc/sys/crypto/fips_enabled" ++ if fips_enabled_content is None: ++ raise IOError("No file exists Bob") ++ return fips_enabled_content ++ ++ load_file.side_effect = fake_load_file ++ ++ assert expected is util.fips_enabled() ++ ++ + class TestLoadYaml(helpers.CiTestCase): + mydefault = "7b03a8ebace993d806255121073fed52" + with_logs = True +-- +2.33.0 + + diff --git a/backport-Do-not-silently-ignore-integer-uid-1280.patch b/backport-Do-not-silently-ignore-integer-uid-1280.patch new file mode 100644 index 0000000000000000000000000000000000000000..fdb2fcd39688c482d895debc7bdfa499aee71642 --- /dev/null +++ b/backport-Do-not-silently-ignore-integer-uid-1280.patch @@ -0,0 +1,60 @@ +From 2837b835f101d81704f018a4f872b1d660eb6f3e Mon Sep 17 00:00:00 2001 +From: Brett Holman +Date: Wed, 23 Feb 2022 11:57:59 -0700 +Subject: [PATCH] Do not silently ignore integer uid (#1280) + +Reference:https://github.com/canonical/cloud-init/commit/2837b835f101d81704f018a4f872b1d660eb6f3e +Conflict:NA + +The docs do not make it obvious that uid is supposed to be of type string. +Current behavior is to silently ignore integer uid. + +LP: #1875772 +--- + cloudinit/distros/__init__.py | 2 ++ + tests/integration_tests/modules/test_users_groups.py | 8 ++++++++ + 2 files changed, 10 insertions(+) + +diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py +index 9a695be..2ee8c9e 100755 +--- a/cloudinit/distros/__init__.py ++++ b/cloudinit/distros/__init__.py +@@ -495,6 +495,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): + if not util.is_group(group): + self.create_group(group) + LOG.debug("created group '%s' for user '%s'", group, name) ++ if "uid" in kwargs.keys(): ++ kwargs["uid"] = str(kwargs["uid"]) + + # Check the values and create the command + for key, val in sorted(kwargs.items()): +diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py +index bcb17b7..326df67 100644 +--- a/tests/integration_tests/modules/test_users_groups.py ++++ b/tests/integration_tests/modules/test_users_groups.py +@@ -39,6 +39,10 @@ AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + gecos: Magic Cloud App Daemon User + inactive: true + system: true ++ - name: eric ++ uid: 1742 ++ - name: archivist ++ uid: '1743' + """ + + +@@ -76,6 +80,10 @@ class TestUsersGroups: + ), + # Test the cloudy user + (["passwd", "cloudy"], r"cloudy:x:[0-9]{3,4}:"), ++ # Test str uid ++ (["passwd", "eric"], r"eric:x:1742:"), ++ # Test int uid ++ (["passwd", "archivist"], r"archivist:x:1743:"), + ], + ) + def test_users_groups(self, regex, getent_args, class_client): +-- +2.33.0 + + diff --git a/backport-Don-t-fail-if-IB-and-Ethernet-devices-collide-1411.patch b/backport-Don-t-fail-if-IB-and-Ethernet-devices-collide-1411.patch new file mode 100644 index 0000000000000000000000000000000000000000..96890eded9515e67ded83e1bfe528a515ba262d1 --- /dev/null +++ b/backport-Don-t-fail-if-IB-and-Ethernet-devices-collide-1411.patch @@ -0,0 +1,75 @@ +iFrom 4d6576560de01ab4f4a75924a5b7b81fd9e5bd2a Mon Sep 17 00:00:00 2001 +From: Brett Holman +Date: Thu, 28 Apr 2022 08:41:28 -0500 +Subject: [PATCH] Don't fail if IB and Ethernet devices 'collide' (#1411) + +Reference:https://github.com/canonical/cloud-init/commit/4d6576560de01ab4f4a75924a5b7b81fd9e5bd2a +Conflict:format diff. + +Current behavior for the collision of Ethernet mac +address and the "openstack 6 byte IB format" is to fail[1]. +This isn't a problem for the respective Ethernet and IB +networks, so don't force cloud-init to fail. + +[1] https://bugs.launchpad.net/cloud-init/+bug/1871326 +--- + cloudinit/net/__init__.py | 40 ++++++++++++++++++++++++++++++++------- + 1 file changed, 33 insertions(+), 7 deletions(-) + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index b007c9a..fba133e 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -903,15 +903,41 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict: + "duplicate mac found! both '%s' and '%s' have mac '%s'" % + (name, ret[mac], mac)) + ret[mac] = name +- # Try to get an Infiniband hardware address (in 6 byte Ethernet format) +- # for the interface. ++ ++ # Pretend that an Infiniband GUID is an ethernet address for Openstack ++ # configuration purposes ++ # TODO: move this format to openstack + ib_mac = get_ib_interface_hwaddr(name, True) + if ib_mac: +- if ib_mac in ret: +- raise RuntimeError( +- "duplicate mac found! both '%s' and '%s' have mac '%s'" % +- (name, ret[ib_mac], ib_mac)) +- ret[ib_mac] = name ++ # If an Ethernet mac address happens to collide with a few bits in ++ # an IB GUID, prefer the ethernet address. ++ # ++ # Log a message in case a user is troubleshooting openstack, but ++ # don't fall over, since this really isn't _a_ problem, and ++ # openstack makes weird assumptions that cause it to fail it's ++ # really not _our_ problem. ++ # ++ # These few bits selected in get_ib_interface_hwaddr() are not ++ # guaranteed to be globally unique in InfiniBand, and really make ++ # no sense to compare them to Ethernet mac addresses. This appears ++ # to be a # workaround for openstack-specific behavior[1], and for ++ # now leave it to avoid breaking openstack ++ # but this should be removed from get_interfaces_by_mac_on_linux() ++ # because IB GUIDs are not mac addresses, and operate on a separate ++ # L2 protocol so address collision doesn't matter. ++ # ++ # [1] sources/helpers/openstack.py:convert_net_json() expects ++ # net.get_interfaces_by_mac() to return IB addresses in this format ++ if ib_mac not in ret: ++ ret[ib_mac] = name ++ else: ++ LOG.warning( ++ "Ethernet and InfiniBand interfaces have the same address" ++ " both '%s' and '%s' have address '%s'.", ++ name, ++ ret[ib_mac], ++ ib_mac, ++ ) + return ret + + +-- +2.33.0 + + diff --git a/backport-Drop-support-of-sk-keys-in-cc_ssh-1451.patch b/backport-Drop-support-of-sk-keys-in-cc_ssh-1451.patch new file mode 100644 index 0000000000000000000000000000000000000000..b4585ec86564c1bd371c01feadbde148b1df9bf1 --- /dev/null +++ b/backport-Drop-support-of-sk-keys-in-cc_ssh-1451.patch @@ -0,0 +1,572 @@ +From 2db1c58512760fcb5a850df852e44833c97ed856 Mon Sep 17 00:00:00 2001 +From: Alberto Contreras +Date: Fri, 13 May 2022 21:48:28 +0200 +Subject: [PATCH] Drop support of *-sk keys in cc_ssh (#1451) + +Reference:https://github.com/canonical/cloud-init/commit/2db1c58512760fcb5a850df852e44833c97ed856 +Conflict:(1)only change tests/unittests/config/test_cc_ssh.py +(2)don't add test_handle_invalid_ssh_keys_are_skipped +(3)don't change TestSshSchema +(4)format diffs. + +- Delete *-sk keys from cloud-init-schema.json under +cc_ssh.{ssh_keys,ssh_genkeytypes} +- Log a warning if some given key is unsupported or unknown. +- Port tests to Pytests, add some types and increase unittest +coverage. +--- + cloudinit/config/tests/test_ssh.py | 457 ++++++++++++----------------- + 1 file changed, 192 insertions(+), 265 deletions(-) + +diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py +index 87ccdb6..714949c 100644 +--- a/cloudinit/config/tests/test_ssh.py ++++ b/cloudinit/config/tests/test_ssh.py +@@ -1,11 +1,14 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + import os.path ++from typing import Optional ++from unittest import mock + + from cloudinit.config import cc_ssh + from cloudinit import ssh_util +-from cloudinit.tests.helpers import CiTestCase, mock ++from tests.unittests.util import get_cloud + import logging ++import pytest + + LOG = logging.getLogger(__name__) + +@@ -14,68 +17,79 @@ KEY_NAMES_NO_DSA = [name for name in cc_ssh.GENERATE_KEY_NAMES + if name not in 'dsa'] + + ++@pytest.fixture(scope="function") ++def publish_hostkey_test_setup(tmpdir): ++ test_hostkeys = { ++ "dsa": ("ssh-dss", "AAAAB3NzaC1kc3MAAACB"), ++ "ecdsa": ("ecdsa-sha2-nistp256", "AAAAE2VjZ"), ++ "ed25519": ("ssh-ed25519", "AAAAC3NzaC1lZDI"), ++ "rsa": ("ssh-rsa", "AAAAB3NzaC1yc2EAAA"), ++ } ++ test_hostkey_files = [] ++ hostkey_tmpdir = tmpdir ++ for key_type in cc_ssh.GENERATE_KEY_NAMES: ++ filename = "ssh_host_%s_key.pub" % key_type ++ filepath = os.path.join(hostkey_tmpdir, filename) ++ test_hostkey_files.append(filepath) ++ with open(filepath, "w") as f: ++ f.write(" ".join(test_hostkeys[key_type])) ++ ++ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key") ++ yield test_hostkeys, test_hostkey_files ++ ++ ++def _replace_options(user: Optional[str] = None) -> str: ++ options = ssh_util.DISABLE_USER_OPTS ++ if user: ++ new_user = user ++ else: ++ new_user = "NONE" ++ options = options.replace("$USER", new_user) ++ options = options.replace("$DISABLE_USER", "root") ++ return options ++ + @mock.patch(MODPATH + "ssh_util.setup_user_keys") +-class TestHandleSsh(CiTestCase): ++class TestHandleSsh: + """Test cc_ssh handling of ssh config.""" + +- def _publish_hostkey_test_setup(self): +- self.test_hostkeys = { +- 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), +- 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), +- 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), +- 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), +- } +- self.test_hostkey_files = [] +- hostkey_tmpdir = self.tmp_dir() +- for key_type in cc_ssh.GENERATE_KEY_NAMES: +- key_data = self.test_hostkeys[key_type] +- filename = 'ssh_host_%s_key.pub' % key_type +- filepath = os.path.join(hostkey_tmpdir, filename) +- self.test_hostkey_files.append(filepath) +- with open(filepath, 'w') as f: +- f.write(' '.join(key_data)) +- +- cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') +- +- def test_apply_credentials_with_user(self, m_setup_keys): +- """Apply keys for the given user and root.""" +- keys = ["key1"] +- user = "clouduser" +- cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) +- self.assertEqual([mock.call(set(keys), user), +- mock.call(set(keys), "root", options="")], +- m_setup_keys.call_args_list) +- +- def test_apply_credentials_with_no_user(self, m_setup_keys): +- """Apply keys for root only.""" +- keys = ["key1"] +- user = None +- cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) +- self.assertEqual([mock.call(set(keys), "root", options="")], +- m_setup_keys.call_args_list) +- +- def test_apply_credentials_with_user_disable_root(self, m_setup_keys): +- """Apply keys for the given user and disable root ssh.""" +- keys = ["key1"] +- user = "clouduser" +- options = ssh_util.DISABLE_USER_OPTS +- cc_ssh.apply_credentials(keys, user, True, options) +- options = options.replace("$USER", user) +- options = options.replace("$DISABLE_USER", "root") +- self.assertEqual([mock.call(set(keys), user), +- mock.call(set(keys), "root", options=options)], +- m_setup_keys.call_args_list) +- +- def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys): +- """Apply keys no user and disable root ssh.""" +- keys = ["key1"] +- user = None ++ @pytest.mark.parametrize( ++ "keys,user,disable_root_opts", ++ [ ++ # For the given user and root. ++ pytest.param(["key1"], "clouduser", False, id="with_user"), ++ # For root only. ++ pytest.param(["key1"], None, False, id="with_no_user"), ++ # For the given user and disable root ssh. ++ pytest.param( ++ ["key1"], ++ "clouduser", ++ True, ++ id="with_user_disable_root", ++ ), ++ # No user and disable root ssh. ++ pytest.param( ++ ["key1"], ++ None, ++ True, ++ id="with_no_user_disable_root", ++ ), ++ ], ++ ) ++ def test_apply_credentials( ++ self, m_setup_keys, keys, user, disable_root_opts ++ ): + options = ssh_util.DISABLE_USER_OPTS +- cc_ssh.apply_credentials(keys, user, True, options) +- options = options.replace("$USER", "NONE") +- options = options.replace("$DISABLE_USER", "root") +- self.assertEqual([mock.call(set(keys), "root", options=options)], +- m_setup_keys.call_args_list) ++ cc_ssh.apply_credentials(keys, user, disable_root_opts, options) ++ if not disable_root_opts: ++ expected_options = "" ++ else: ++ expected_options = _replace_options(user) ++ expected_calls = [ ++ mock.call(set(keys), "root", options=expected_options) ++ ] ++ if user: ++ expected_calls = [mock.call(set(keys), user)] + expected_calls ++ assert expected_calls == m_setup_keys.call_args_list + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") +@@ -90,20 +104,20 @@ class TestHandleSsh(CiTestCase): + m_path_exists.return_value = True + m_nug.return_value = ([], {}) + cc_ssh.PUBLISH_HOST_KEYS = False +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) ++ cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") + m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') +- self.assertIn( +- [mock.call('/etc/ssh/ssh_host_rsa_key'), +- mock.call('/etc/ssh/ssh_host_dsa_key'), +- mock.call('/etc/ssh/ssh_host_ecdsa_key'), +- mock.call('/etc/ssh/ssh_host_ed25519_key')], +- m_path_exists.call_args_list) +- self.assertEqual([mock.call(set(keys), "root", options=options)], +- m_setup_keys.call_args_list) ++ assert [ ++ mock.call("/etc/ssh/ssh_host_rsa_key"), ++ mock.call("/etc/ssh/ssh_host_dsa_key"), ++ mock.call("/etc/ssh/ssh_host_ecdsa_key"), ++ mock.call("/etc/ssh/ssh_host_ed25519_key"), ++ ] in m_path_exists.call_args_list ++ assert [ ++ mock.call(set(keys), "root", options=options) ++ ] == m_setup_keys.call_args_list + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") +@@ -120,231 +134,144 @@ class TestHandleSsh(CiTestCase): + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) +- cc_ssh.handle("name", cfg, cloud, LOG, None) +- +- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) +- options = options.replace("$DISABLE_USER", "root") +- self.assertEqual([mock.call(set(), user), +- mock.call(set(), "root", options=options)], +- m_setup_keys.call_args_list) +- +- @mock.patch(MODPATH + "glob.glob") +- @mock.patch(MODPATH + "ug_util.normalize_users_groups") +- @mock.patch(MODPATH + "os.path.exists") +- def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug, +- m_glob, m_setup_keys): +- """Test handle with no config and a default distro user.""" +- cfg = {} +- keys = ["key1"] +- user = "clouduser" +- m_glob.return_value = [] # Return no matching keys to prevent removal +- # Mock os.path.exits to True to short-circuit the key writing logic +- m_path_exists.return_value = True +- m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) ++ cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") +- self.assertEqual([mock.call(set(keys), user), +- mock.call(set(keys), "root", options=options)], +- m_setup_keys.call_args_list) +- +- @mock.patch(MODPATH + "glob.glob") +- @mock.patch(MODPATH + "ug_util.normalize_users_groups") +- @mock.patch(MODPATH + "os.path.exists") +- def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug, +- m_glob, m_setup_keys): +- """Test handle with explicit disable_root and a default distro user.""" +- # This test is identical to test_handle_no_cfg_and_default_root, +- # except this uses an explicit cfg value +- cfg = {"disable_root": True} +- keys = ["key1"] +- user = "clouduser" +- m_glob.return_value = [] # Return no matching keys to prevent removal +- # Mock os.path.exits to True to short-circuit the key writing logic +- m_path_exists.return_value = True +- m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) +- cc_ssh.handle("name", cfg, cloud, LOG, None) +- +- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) +- options = options.replace("$DISABLE_USER", "root") +- self.assertEqual([mock.call(set(keys), user), +- mock.call(set(keys), "root", options=options)], +- m_setup_keys.call_args_list) +- ++ assert [ ++ mock.call(set(), user), ++ mock.call(set(), "root", options=options), ++ ] == m_setup_keys.call_args_list ++ ++ @pytest.mark.parametrize( ++ "cfg,mock_get_public_ssh_keys,empty_opts", ++ [ ++ pytest.param({}, False, False, id="no_cfg"), ++ pytest.param( ++ {"disable_root": True}, ++ False, ++ False, ++ id="explicit_disable_root", ++ ), ++ # When disable_root == False, the ssh redirect for root is skipped ++ pytest.param( ++ {"disable_root": False}, ++ True, ++ True, ++ id="cfg_without_disable_root", ++ ), ++ ], ++ ) + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") +- def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug, +- m_glob, m_setup_keys): +- """Test handle with disable_root == False.""" +- # When disable_root == False, the ssh redirect for root is skipped +- cfg = {"disable_root": False} ++ def test_handle_default_root( ++ self, ++ m_path_exists, ++ m_nug, ++ m_glob, ++ m_setup_keys, ++ cfg, ++ mock_get_public_ssh_keys, ++ empty_opts, ++ ): ++ """Test handle with a default distro user.""" + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) ++ cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys}) ++ if mock_get_public_ssh_keys: ++ cloud.get_public_ssh_keys = mock.Mock(return_value=keys) + cloud.get_public_ssh_keys = mock.Mock(return_value=keys) + cc_ssh.handle("name", cfg, cloud, LOG, None) + +- self.assertEqual([mock.call(set(keys), user), +- mock.call(set(keys), "root", options="")], +- m_setup_keys.call_args_list) +- +- @mock.patch(MODPATH + "glob.glob") +- @mock.patch(MODPATH + "ug_util.normalize_users_groups") +- @mock.patch(MODPATH + "os.path.exists") +- def test_handle_publish_hostkeys_default( +- self, m_path_exists, m_nug, m_glob, m_setup_keys): +- """Test handle with various configs for ssh_publish_hostkeys.""" +- self._publish_hostkey_test_setup() +- cc_ssh.PUBLISH_HOST_KEYS = True +- keys = ["key1"] +- user = "clouduser" +- # Return no matching keys for first glob, test keys for second. +- m_glob.side_effect = iter([ +- [], +- self.test_hostkey_files, +- ]) +- # Mock os.path.exits to True to short-circuit the key writing logic +- m_path_exists.return_value = True +- m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) +- cloud.datasource.publish_host_keys = mock.Mock() +- +- cfg = {} +- expected_call = [self.test_hostkeys[key_type] for key_type +- in KEY_NAMES_NO_DSA] +- cc_ssh.handle("name", cfg, cloud, LOG, None) +- self.assertEqual([mock.call(expected_call)], +- cloud.datasource.publish_host_keys.call_args_list) +- +- @mock.patch(MODPATH + "glob.glob") +- @mock.patch(MODPATH + "ug_util.normalize_users_groups") +- @mock.patch(MODPATH + "os.path.exists") +- def test_handle_publish_hostkeys_config_enable( +- self, m_path_exists, m_nug, m_glob, m_setup_keys): +- """Test handle with various configs for ssh_publish_hostkeys.""" +- self._publish_hostkey_test_setup() +- cc_ssh.PUBLISH_HOST_KEYS = False +- keys = ["key1"] +- user = "clouduser" +- # Return no matching keys for first glob, test keys for second. +- m_glob.side_effect = iter([ +- [], +- self.test_hostkey_files, +- ]) +- # Mock os.path.exits to True to short-circuit the key writing logic +- m_path_exists.return_value = True +- m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) +- cloud.datasource.publish_host_keys = mock.Mock() +- +- cfg = {'ssh_publish_hostkeys': {'enabled': True}} +- expected_call = [self.test_hostkeys[key_type] for key_type +- in KEY_NAMES_NO_DSA] +- cc_ssh.handle("name", cfg, cloud, LOG, None) +- self.assertEqual([mock.call(expected_call)], +- cloud.datasource.publish_host_keys.call_args_list) +- +- @mock.patch(MODPATH + "glob.glob") +- @mock.patch(MODPATH + "ug_util.normalize_users_groups") +- @mock.patch(MODPATH + "os.path.exists") +- def test_handle_publish_hostkeys_config_disable( +- self, m_path_exists, m_nug, m_glob, m_setup_keys): +- """Test handle with various configs for ssh_publish_hostkeys.""" +- self._publish_hostkey_test_setup() +- cc_ssh.PUBLISH_HOST_KEYS = True +- keys = ["key1"] +- user = "clouduser" +- # Return no matching keys for first glob, test keys for second. +- m_glob.side_effect = iter([ +- [], +- self.test_hostkey_files, +- ]) +- # Mock os.path.exits to True to short-circuit the key writing logic +- m_path_exists.return_value = True +- m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) +- cloud.datasource.publish_host_keys = mock.Mock() +- +- cfg = {'ssh_publish_hostkeys': {'enabled': False}} +- cc_ssh.handle("name", cfg, cloud, LOG, None) +- self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) +- cloud.datasource.publish_host_keys.assert_not_called() +- ++ if empty_opts: ++ options = "" ++ else: ++ options = _replace_options(user) ++ assert [ ++ mock.call(set(keys), user), ++ mock.call(set(keys), "root", options=options), ++ ] == m_setup_keys.call_args_list ++ ++ @pytest.mark.parametrize( ++ "cfg, expected_key_types", ++ [ ++ pytest.param({}, KEY_NAMES_NO_DSA, id="default"), ++ pytest.param( ++ {"ssh_publish_hostkeys": {"enabled": True}}, ++ KEY_NAMES_NO_DSA, ++ id="config_enable", ++ ), ++ pytest.param( ++ {"ssh_publish_hostkeys": {"enabled": False}}, ++ None, ++ id="config_disable", ++ ), ++ pytest.param( ++ { ++ "ssh_publish_hostkeys": { ++ "enabled": True, ++ "blacklist": ["dsa", "rsa"], ++ } ++ }, ++ ["ecdsa", "ed25519"], ++ id="config_blacklist", ++ ), ++ pytest.param( ++ {"ssh_publish_hostkeys": {"enabled": True, "blacklist": []}}, ++ cc_ssh.GENERATE_KEY_NAMES, ++ id="empty_blacklist", ++ ), ++ ], ++ ) + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") +- def test_handle_publish_hostkeys_config_blacklist( +- self, m_path_exists, m_nug, m_glob, m_setup_keys): ++ def test_handle_publish_hostkeys( ++ self, ++ m_path_exists, ++ m_nug, ++ m_glob, ++ m_setup_keys, ++ publish_hostkey_test_setup, ++ cfg, ++ expected_key_types, ++ ): + """Test handle with various configs for ssh_publish_hostkeys.""" +- self._publish_hostkey_test_setup() ++ test_hostkeys, test_hostkey_files = publish_hostkey_test_setup + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], +- self.test_hostkey_files, ++ test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) ++ cloud = get_cloud(distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + +- cfg = {'ssh_publish_hostkeys': {'enabled': True, +- 'blacklist': ['dsa', 'rsa']}} +- expected_call = [self.test_hostkeys[key_type] for key_type +- in ['ecdsa', 'ed25519']] +- cc_ssh.handle("name", cfg, cloud, LOG, None) +- self.assertEqual([mock.call(expected_call)], +- cloud.datasource.publish_host_keys.call_args_list) +- +- @mock.patch(MODPATH + "glob.glob") +- @mock.patch(MODPATH + "ug_util.normalize_users_groups") +- @mock.patch(MODPATH + "os.path.exists") +- def test_handle_publish_hostkeys_empty_blacklist( +- self, m_path_exists, m_nug, m_glob, m_setup_keys): +- """Test handle with various configs for ssh_publish_hostkeys.""" +- self._publish_hostkey_test_setup() +- cc_ssh.PUBLISH_HOST_KEYS = True +- keys = ["key1"] +- user = "clouduser" +- # Return no matching keys for first glob, test keys for second. +- m_glob.side_effect = iter([ +- [], +- self.test_hostkey_files, +- ]) +- # Mock os.path.exits to True to short-circuit the key writing logic +- m_path_exists.return_value = True +- m_nug.return_value = ({user: {"default": user}}, {}) +- cloud = self.tmp_cloud( +- distro='ubuntu', metadata={'public-keys': keys}) +- cloud.datasource.publish_host_keys = mock.Mock() +- +- cfg = {'ssh_publish_hostkeys': {'enabled': True, +- 'blacklist': []}} +- expected_call = [self.test_hostkeys[key_type] for key_type +- in cc_ssh.GENERATE_KEY_NAMES] ++ expected_calls = [] ++ if expected_key_types is not None: ++ expected_calls = [ ++ mock.call( ++ [ ++ test_hostkeys[key_type] ++ for key_type in expected_key_types ++ ] ++ ) ++ ] + cc_ssh.handle("name", cfg, cloud, LOG, None) +- self.assertEqual([mock.call(expected_call)], +- cloud.datasource.publish_host_keys.call_args_list) ++ assert ( ++ expected_calls == cloud.datasource.publish_host_keys.call_args_list ++ ) + + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "util.write_file") +@@ -360,7 +287,7 @@ class TestHandleSsh(CiTestCase): + public_name = "{}_public".format(key_type) + cert_name = "{}_certificate".format(key_type) + +- # Actual key contents don"t have to be realistic ++ # Actual key contents don't have to be realistic + private_value = "{}_PRIVATE_KEY".format(key_type) + public_value = "{}_PUBLIC_KEY".format(key_type) + cert_value = "{}_CERT_KEY".format(key_type) +@@ -397,9 +324,9 @@ class TestHandleSsh(CiTestCase): + m_nug.return_value = ([], {}) + with mock.patch(MODPATH + 'ssh_util.parse_ssh_config', + return_value=[]): +- cc_ssh.handle("name", cfg, self.tmp_cloud(distro='ubuntu'), +- LOG, None) ++ cc_ssh.handle("name", cfg, get_cloud(distro="ubuntu"), LOG, None) + + # Check that all expected output has been done. + for call_ in expected_calls: +- self.assertIn(call_, m_write_file.call_args_list) ++ assert call_ in m_write_file.call_args_list ++ +-- +2.33.0 + + diff --git a/backport-Ensure-network-ready-before-cloud-init-service-runs.patch b/backport-Ensure-network-ready-before-cloud-init-service-runs.patch new file mode 100644 index 0000000000000000000000000000000000000000..1c79baeb09625c5db0ccfcead539e172a25dc721 --- /dev/null +++ b/backport-Ensure-network-ready-before-cloud-init-service-runs.patch @@ -0,0 +1,30 @@ +From 6e725f36647407d201af0603d7db11fc96a93d4d Mon Sep 17 00:00:00 2001 +From: James Falcon +Date: Tue, 13 Dec 2022 10:55:23 -0600 +Subject: [PATCH] Ensure network ready before cloud-init service runs on +RHEL + (#1893) + +Reference:https://github.com/canonical/cloud-init/commit/6e725f36647407d201af0603d7db11fc96a93d4d +Conflict:NA + +LP: #1998655 +--- + systemd/cloud-init.service.tmpl | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl +index 25830e0..d62b46f 100644 +--- a/systemd/cloud-init.service.tmpl ++++ b/systemd/cloud-init.service.tmpl +@@ -16,6 +16,7 @@ After=networking.service + "openeuler", "rhel", "rocky", "virtuozzo"] %} + After=network.service + After=NetworkManager.service ++After=NetworkManager-wait-online.service + {% endif %} + {% if variant in ["suse"] %} + After=wicked.service +-- +2.27.0 + diff --git a/backport-Fix-IPv6-netmask-format-for-sysconfig-1215.patch b/backport-Fix-IPv6-netmask-format-for-sysconfig-1215.patch new file mode 100644 index 0000000000000000000000000000000000000000..07254f92741998e529b3c3eb47d5366bb0fa4456 --- /dev/null +++ b/backport-Fix-IPv6-netmask-format-for-sysconfig-1215.patch @@ -0,0 +1,549 @@ +From b97a30f0a05c1dea918c46ca9c05c869d15fe2d5 Mon Sep 17 00:00:00 2001 +From: Harald +Date: Tue, 8 Feb 2022 15:49:00 +0100 +Subject: [PATCH] Fix IPv6 netmask format for sysconfig (#1215) + +Reference:https://github.com/canonical/cloud-init/commit/b97a30f0a05c1dea918c46ca9c05c869d15fe2d5 +Conflict:format diffs. + +This change converts the IPv6 netmask from the network_data.json[1] +format to the CIDR style, /. + +Using an IPv6 address like ffff:ffff:ffff:ffff:: does not work with +NetworkManager, nor networkscripts. + +NetworkManager will ignore the route, logging: + ifcfg-rh: ignoring invalid route at \ + "::/:: via fd00:fd00:fd00:2::fffe dev " \ + (/etc/sysconfig/network-scripts/route6-:3): \ + Argument for "::/::" is not ADDR/PREFIX format + +Similarly if using networkscripts, ip route fail with error: + Error: inet6 prefix is expected rather than \ + "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::". + +Also a bit of refactoring ... + +cloudinit.net.sysconfig.Route.to_string: +* Move a couple of lines around to reduce repeated code. +* if "ADDRESS" not in key -> continute, so that the + code block following it can be de-indented. +cloudinit.net.network_state: +* Refactors the ipv4_mask_to_net_prefix, ipv6_mask_to_net_prefix + removes mask_to_net_prefix methods. Utilize ipaddress library to + do some of the heavy lifting. + +LP: #1959148 +--- + cloudinit/net/__init__.py | 7 +- + cloudinit/net/network_state.py | 101 +++++++----------- + cloudinit/net/sysconfig.py | 91 +++++++++------- + cloudinit/sources/DataSourceOpenNebula.py | 2 +- + .../sources/helpers/vmware/imc/config_nic.py | 4 +- + cloudinit/net/tests/test_init.py | 4 +- + cloudinit/net/tests/test_network_state.py | 58 +++++++++- + tests/unittests/test_net.py | 78 +++++++++++++- + 8 files changed, 233 insertions(+), 112 deletions(-) + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index a503210..d3ac4c8 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -15,7 +15,7 @@ from typing import Any, Dict + + from cloudinit import subp + from cloudinit import util +-from cloudinit.net.network_state import mask_to_net_prefix ++from cloudinit.net.network_state import ipv4_mask_to_net_prefix + from cloudinit.url_helper import UrlError, readurl + + LOG = logging.getLogger(__name__) +@@ -1140,10 +1140,11 @@ class EphemeralIPv4Network(object): + 'Cannot init network on {0} with {1}/{2} and bcast {3}'.format( + interface, ip, prefix_or_mask, broadcast)) + try: +- self.prefix = mask_to_net_prefix(prefix_or_mask) ++ self.prefix = ipv4_mask_to_net_prefix(prefix_or_mask) + except ValueError as e: + raise ValueError( +- 'Cannot setup network: {0}'.format(e) ++ "Cannot setup network, invalid prefix or " ++ "netmask: {0}".format(e) + ) from e + + self.connectivity_url_data = connectivity_url_data +diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py +index ac44304..0a4e0c6 100644 +--- a/cloudinit/net/network_state.py ++++ b/cloudinit/net/network_state.py +@@ -6,6 +6,7 @@ + + import copy + import functools ++import ipaddress + import logging + import socket + import struct +@@ -878,10 +879,16 @@ def _normalize_net_keys(network, address_keys=()): + try: + prefix = int(maybe_prefix) + except ValueError: +- # this supports input of
/255.255.255.0 +- prefix = mask_to_net_prefix(maybe_prefix) +- elif netmask: +- prefix = mask_to_net_prefix(netmask) ++ if ipv6: ++ # this supports input of ffff:ffff:ffff:: ++ prefix = ipv6_mask_to_net_prefix(maybe_prefix) ++ else: ++ # this supports input of 255.255.255.0 ++ prefix = ipv4_mask_to_net_prefix(maybe_prefix) ++ elif netmask and not ipv6: ++ prefix = ipv4_mask_to_net_prefix(netmask) ++ elif netmask and ipv6: ++ prefix = ipv6_mask_to_net_prefix(netmask) + elif 'prefix' in net: + prefix = int(net['prefix']) + else: +@@ -978,73 +985,41 @@ def ipv4_mask_to_net_prefix(mask): + str(24) => 24 + "24" => 24 + """ +- if isinstance(mask, int): +- return mask +- if isinstance(mask, str): +- try: +- return int(mask) +- except ValueError: +- pass +- else: +- raise TypeError("mask '%s' is not a string or int") +- +- if '.' not in mask: +- raise ValueError("netmask '%s' does not contain a '.'" % mask) +- +- toks = mask.split(".") +- if len(toks) != 4: +- raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks))) +- +- return sum([bin(int(x)).count('1') for x in toks]) +- ++ return ipaddress.ip_network(f"0.0.0.0/{mask}").prefixlen + + def ipv6_mask_to_net_prefix(mask): + """Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix. + +- If 'mask' is an integer or string representation of one then +- int(mask) will be returned. ++ If the input is already an integer or a string representation of ++ an integer, then int(mask) will be returned. ++ "ffff:ffff:ffff::" => 48 ++ "48" => 48 + """ +- +- if isinstance(mask, int): +- return mask +- if isinstance(mask, str): +- try: +- return int(mask) +- except ValueError: +- pass +- else: +- raise TypeError("mask '%s' is not a string or int") +- +- if ':' not in mask: +- raise ValueError("mask '%s' does not have a ':'") +- +- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00, +- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc, +- 0xfffe, 0xffff] +- prefix = 0 +- for word in mask.split(':'): +- if not word or int(word, 16) == 0: +- break +- prefix += bitCount.index(int(word, 16)) +- +- return prefix +- +- +-def mask_to_net_prefix(mask): +- """Return the network prefix for the netmask provided. +- +- Supports ipv4 or ipv6 netmasks.""" + try: +- # if 'mask' is a prefix that is an integer. +- # then just return it. +- return int(mask) ++ # In the case the mask is already a prefix ++ prefixlen = ipaddress.ip_network(f"::/{mask}").prefixlen ++ return prefixlen + except ValueError: ++ # ValueError means mask is an IPv6 address representation and need ++ # conversion. + pass +- if is_ipv6_addr(mask): +- return ipv6_mask_to_net_prefix(mask) +- else: +- return ipv4_mask_to_net_prefix(mask) +- ++ ++ netmask = ipaddress.ip_address(mask) ++ mask_int = int(netmask) ++ # If the mask is all zeroes, just return it ++ if mask_int == 0: ++ return mask_int ++ ++ trailing_zeroes = min( ++ ipaddress.IPV6LENGTH, (~mask_int & (mask_int - 1)).bit_length() ++ ) ++ leading_ones = mask_int >> trailing_zeroes ++ prefixlen = ipaddress.IPV6LENGTH - trailing_zeroes ++ all_ones = (1 << prefixlen) - 1 ++ if leading_ones != all_ones: ++ raise ValueError("Invalid network mask '%s'" % mask) ++ ++ return prefixlen + + def mask_and_ipv4_to_bcast_addr(mask, ip): + """Calculate the broadcast address from the subnet mask and ip addr. +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 32a2d5a..4c6caef 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -12,6 +12,7 @@ from cloudinit import util + from cloudinit import subp + from cloudinit.distros.parsers import networkmanager_conf + from cloudinit.distros.parsers import resolv_conf ++from cloudinit.net import network_state + + from . import renderer + from .network_state import ( +@@ -172,44 +173,60 @@ class Route(ConfigMap): + # (because Route can contain a mix of IPv4 and IPv6) + reindex = -1 + for key in sorted(self._conf.keys()): +- if 'ADDRESS' in key: +- index = key.replace('ADDRESS', '') +- address_value = str(self._conf[key]) +- # only accept combinations: +- # if proto ipv6 only display ipv6 routes +- # if proto ipv4 only display ipv4 routes +- # do not add ipv6 routes if proto is ipv4 +- # do not add ipv4 routes if proto is ipv6 +- # (this array will contain a mix of ipv4 and ipv6) +- if proto == "ipv4" and not self.is_ipv6_route(address_value): +- netmask_value = str(self._conf['NETMASK' + index]) +- gateway_value = str(self._conf['GATEWAY' + index]) +- # increase IPv4 index +- reindex = reindex + 1 +- buf.write("%s=%s\n" % ('ADDRESS' + str(reindex), +- _quote_value(address_value))) +- buf.write("%s=%s\n" % ('GATEWAY' + str(reindex), +- _quote_value(gateway_value))) +- buf.write("%s=%s\n" % ('NETMASK' + str(reindex), +- _quote_value(netmask_value))) +- metric_key = 'METRIC' + index +- if metric_key in self._conf: +- metric_value = str(self._conf['METRIC' + index]) +- buf.write("%s=%s\n" % ('METRIC' + str(reindex), +- _quote_value(metric_value))) +- elif proto == "ipv6" and self.is_ipv6_route(address_value): +- netmask_value = str(self._conf['NETMASK' + index]) +- gateway_value = str(self._conf['GATEWAY' + index]) +- metric_value = ( +- 'metric ' + str(self._conf['METRIC' + index]) +- if 'METRIC' + index in self._conf else '') +- buf.write( +- "%s/%s via %s %s dev %s\n" % (address_value, +- netmask_value, +- gateway_value, +- metric_value, +- self._route_name)) ++ if "ADDRESS" not in key: ++ continue + ++ index = key.replace("ADDRESS", "") ++ address_value = str(self._conf[key]) ++ netmask_value = str(self._conf["NETMASK" + index]) ++ gateway_value = str(self._conf["GATEWAY" + index]) ++ ++ # only accept combinations: ++ # if proto ipv6 only display ipv6 routes ++ # if proto ipv4 only display ipv4 routes ++ # do not add ipv6 routes if proto is ipv4 ++ # do not add ipv4 routes if proto is ipv6 ++ # (this array will contain a mix of ipv4 and ipv6) ++ if proto == "ipv4" and not self.is_ipv6_route(address_value): ++ # increase IPv4 index ++ reindex = reindex + 1 ++ buf.write( ++ "%s=%s\n" ++ % ("ADDRESS" + str(reindex), _quote_value(address_value)) ++ ) ++ buf.write( ++ "%s=%s\n" ++ % ("GATEWAY" + str(reindex), _quote_value(gateway_value)) ++ ) ++ buf.write( ++ "%s=%s\n" ++ % ("NETMASK" + str(reindex), _quote_value(netmask_value)) ++ ) ++ metric_key = "METRIC" + index ++ if metric_key in self._conf: ++ metric_value = str(self._conf["METRIC" + index]) ++ buf.write("%s=%s\n" ++ % ("METRIC" + str(reindex), _quote_value(metric_value)) ++ ) ++ elif proto == "ipv6" and self.is_ipv6_route(address_value): ++ prefix_value = network_state.ipv6_mask_to_net_prefix( ++ netmask_value ++ ) ++ metric_value = ( ++ "metric " + str(self._conf["METRIC" + index]) ++ if "METRIC" + index in self._conf ++ else "" ++ ) ++ buf.write( ++ "%s/%s via %s %s dev %s\n" ++ % ( ++ address_value, ++ prefix_value, ++ gateway_value, ++ metric_value, ++ self._route_name, ++ ) ++ ) + return buf.getvalue() + + +diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py +index 21603fb..8b37f39 100644 +--- a/cloudinit/sources/DataSourceOpenNebula.py ++++ b/cloudinit/sources/DataSourceOpenNebula.py +@@ -237,7 +237,7 @@ class OpenNebulaNetwork(object): + # Set IPv4 address + devconf['addresses'] = [] + mask = self.get_mask(c_dev) +- prefix = str(net.mask_to_net_prefix(mask)) ++ prefix = str(net.ipv4_mask_to_net_prefix(mask)) + devconf['addresses'].append( + self.get_ip(c_dev, mac) + '/' + prefix) + +diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py +index 9cd2c0c..3a45c67 100644 +--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py ++++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py +@@ -9,7 +9,7 @@ import logging + import os + import re + +-from cloudinit.net.network_state import mask_to_net_prefix ++from cloudinit.net.network_state import ipv4_mask_to_net_prefix + from cloudinit import subp + from cloudinit import util + +@@ -180,7 +180,7 @@ class NicConfigurator(object): + """ + route_list = [] + +- cidr = mask_to_net_prefix(netmask) ++ cidr = ipv4_mask_to_net_prefix(netmask) + + for gateway in gateways: + destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr) +diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py +index 2ef5ab7..215cabf 100644 +--- a/cloudinit/net/tests/test_init.py ++++ b/cloudinit/net/tests/test_init.py +@@ -588,7 +588,9 @@ class TestEphemeralIPV4Network(CiTestCase): + with net.EphemeralIPv4Network(**params): + pass + error = context_manager.exception +- self.assertIn('Cannot setup network: netmask', str(error)) ++ self.assertIn( ++ "Cannot setup network, invalid prefix or netmask: ", str(error) ++ ) + self.assertEqual(0, m_subp.call_count) + + def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): +diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py +index 45e9917..f03db50 100644 +--- a/cloudinit/net/tests/test_network_state.py ++++ b/cloudinit/net/tests/test_network_state.py +@@ -1,5 +1,5 @@ + # This file is part of cloud-init. See LICENSE file for license information. +- ++import ipaddress + from unittest import mock + + import pytest +@@ -161,4 +161,60 @@ class TestNetworkStateParseNameservers: + 'spam.local', + ] == sorted(config.dns_searchdomains) + ++class TestNetworkStateHelperFunctions(CiTestCase): ++ def test_mask_to_net_prefix_ipv4(self): ++ netmask_value = "255.255.255.0" ++ expected = 24 ++ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value) ++ assert prefix_value == expected ++ ++ def test_mask_to_net_prefix_all_bits_ipv4(self): ++ netmask_value = "255.255.255.255" ++ expected = 32 ++ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value) ++ assert prefix_value == expected ++ ++ def test_mask_to_net_prefix_to_many_bits_ipv4(self): ++ netmask_value = "33" ++ self.assertRaises( ++ ValueError, network_state.ipv4_mask_to_net_prefix, netmask_value ++ ) ++ ++ def test_mask_to_net_prefix_all_bits_ipv6(self): ++ netmask_value = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" ++ expected = 128 ++ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value) ++ assert prefix_value == expected ++ ++ def test_mask_to_net_prefix_ipv6(self): ++ netmask_value = "ffff:ffff:ffff:ffff::" ++ expected = 64 ++ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value) ++ assert prefix_value == expected ++ ++ def test_mask_to_net_prefix_raises_value_error(self): ++ netmask_value = "ff:ff:ff:ff::" ++ self.assertRaises( ++ ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value ++ ) ++ ++ def test_mask_to_net_prefix_to_many_bits_ipv6(self): ++ netmask_value = "129" ++ self.assertRaises( ++ ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value ++ ) ++ ++ def test_mask_to_net_prefix_ipv4_object(self): ++ netmask_value = ipaddress.IPv4Address("255.255.255.255") ++ expected = 32 ++ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value) ++ assert prefix_value == expected ++ ++ def test_mask_to_net_prefix_ipv6_object(self): ++ netmask_value = ipaddress.IPv6Address("ffff:ffff:ffff::") ++ expected = 48 ++ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value) ++ assert prefix_value == expected ++ ++ + # vi: ts=4 expandtab +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 764e1c7..5013277 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -2237,10 +2237,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + routes: + - gateway: 2001:67c:1562:1 + network: 2001:67c:1 +- netmask: ffff:ffff:0 ++ netmask: "ffff:ffff::" + - gateway: 3001:67c:1562:1 + network: 3001:67c:1 +- netmask: ffff:ffff:0 ++ netmask: "ffff:ffff::" + metric: 10000 + """), + 'expected_netplan': textwrap.dedent(""" +@@ -2507,8 +2507,8 @@ iface bond0 inet6 static + 'route6-bond0': textwrap.dedent("""\ + # Created by cloud-init on instance boot automatically, do not edit. + # +- 2001:67c:1/ffff:ffff:0 via 2001:67c:1562:1 dev bond0 +- 3001:67c:1/ffff:ffff:0 via 3001:67c:1562:1 metric 10000 dev bond0 ++ 2001:67c:1/32 via 2001:67c:1562:1 dev bond0 ++ 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0 + """), + 'route-bond0': textwrap.dedent("""\ + ADDRESS0=10.1.3.0 +@@ -3297,6 +3297,76 @@ USERCTL=no + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + ++ def test_invalid_network_mask_ipv6(self): ++ net_json = { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [ ++ { ++ "network_id": "public-ipv6", ++ "type": "ipv6", ++ "netmask": "", ++ "link": "tap1a81968a-79", ++ "routes": [ ++ { ++ "gateway": "2001:DB8::1", ++ "netmask": "ff:ff:ff:ff::", ++ "network": "2001:DB8:1::1", ++ }, ++ ], ++ "ip_address": "2001:DB8::10", ++ "id": "network1", ++ } ++ ], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, ++ "type": "bridge", ++ "id": "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", ++ }, ++ ], ++ } ++ macs = {"fa:16:3e:ed:9a:59": "eth0"} ++ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ++ with self.assertRaises(ValueError): ++ network_state.parse_net_config_data(network_cfg, skip_broken=False) ++ ++ def test_invalid_network_mask_ipv4(self): ++ net_json = { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [ ++ { ++ "network_id": "public-ipv4", ++ "type": "ipv4", ++ "netmask": "", ++ "link": "tap1a81968a-79", ++ "routes": [ ++ { ++ "gateway": "172.20.0.1", ++ "netmask": "255.234.255.0", ++ "network": "172.19.0.0", ++ }, ++ ], ++ "ip_address": "172.20.0.10", ++ "id": "network1", ++ } ++ ], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, ++ "type": "bridge", ++ "id": "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", ++ }, ++ ], ++ } ++ macs = {"fa:16:3e:ed:9a:59": "eth0"} ++ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ++ with self.assertRaises(ValueError): ++ network_state.parse_net_config_data(network_cfg, skip_broken=False) ++ + def test_openstack_rendering_samples(self): + for os_sample in OS_SAMPLES: + render_dir = self.tmp_dir() +-- +2.33.0 + + diff --git a/backport-Fix-KeyError-in-iproute-pformat-3287.patch b/backport-Fix-KeyError-in-iproute-pformat-3287.patch new file mode 100644 index 0000000000000000000000000000000000000000..203c3b6a508089a1328e213ec69ebd70efb28ee7 --- /dev/null +++ b/backport-Fix-KeyError-in-iproute-pformat-3287.patch @@ -0,0 +1,157 @@ +From 7c52b8a5fbcfe1e7442bbd011956b52e303745c5 Mon Sep 17 00:00:00 2001 +From: Dmitry Zykov <44090270+zykovd@users.noreply.github.com> +Date: Fri, 12 May 2023 21:48:10 +0300 +Subject: [PATCH] Fix KeyError in iproute pformat (#3287) + +Reference:https://github.com/canonical/cloud-init/commit/7c52b8a5fbcfe1e7442bbd011956b52e303745c5 +Conflict:format diff. + +This fixes KeyError on specific network configuration when running +cloud-init on "network" stage. The same problem was mentioned in +#746 and #1041. +--- + cloudinit/netinfo.py | 16 +++++++---- + .../route-formatted-output-missing-gateway | 19 +++++++++++++ + .../sample-iproute-output-v4-missing-gateway | 1 + + .../sample-iproute-output-v6-missing-gateway | 10 +++++++ + cloudinit/tests/test_netinfo.py | 27 +++++++++++++++++++ + 5 files changed, 68 insertions(+), 5 deletions(-) + create mode 100644 tests/data/netinfo/route-formatted-output-missing-gateway + create mode 100644 tests/data/netinfo/sample-iproute-output-v4-missing-gateway + create mode 100644 tests/data/netinfo/sample-iproute-output-v6-missing-gateway + +diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py +index 628e290..f26399a 100644 +--- a/cloudinit/netinfo.py ++++ b/cloudinit/netinfo.py +@@ -460,9 +460,12 @@ def route_pformat(): + tbl_v4 = SimpleTable(fields_v4) + for (n, r) in enumerate(routes.get('ipv4')): + route_id = str(n) +- tbl_v4.add_row([route_id, r['destination'], +- r['gateway'], r['genmask'], +- r['iface'], r['flags']]) ++ try: ++ tbl_v4.add_row([route_id, r['destination'], ++ r['gateway'], r['genmask'], ++ r['iface'], r['flags']]) ++ except KeyError as e: ++ util.logexc(LOG, "Route info formatting error: %s" % e) + route_s = tbl_v4.get_string() + max_len = len(max(route_s.splitlines(), key=len)) + header = util.center("Route IPv4 info", "+", max_len) +@@ -475,8 +478,11 @@ def route_pformat(): + route_id = str(n) + if r['iface'] == 'lo': + continue +- tbl_v6.add_row([route_id, r['destination'], +- r['gateway'], r['iface'], r['flags']]) ++ try: ++ tbl_v6.add_row([route_id, r['destination'], ++ r['gateway'], r['iface'], r['flags']]) ++ except KeyError as e: ++ util.logexc(LOG, "Route info formatting error: %s" % e) + route_s = tbl_v6.get_string() + max_len = len(max(route_s.splitlines(), key=len)) + header = util.center("Route IPv6 info", "+", max_len) +diff --git a/tests/data/netinfo/route-formatted-output-missing-gateway b/tests/data/netinfo/route-formatted-output-missing-gateway +new file mode 100644 +index 0000000..8bbd819 +--- /dev/null ++++ b/tests/data/netinfo/route-formatted-output-missing-gateway +@@ -0,0 +1,19 @@ +++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++ +++-------+-------------+---------+---------------+-----------+-------+ ++| Route | Destination | Gateway | Genmask | Interface | Flags | +++-------+-------------+---------+---------------+-----------+-------+ ++| 0 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 | U | +++-------+-------------+---------+---------------+-----------+-------+ ++++++++++++++++++++++++++++Route IPv6 info++++++++++++++++++++++++++ +++-------+---------------------------+---------+-----------+-------+ ++| Route | Destination | Gateway | Interface | Flags | +++-------+---------------------------+---------+-----------+-------+ ++| 0 | 2a00:abcd:82ae:cd33::657 | :: | enp0s25 | Ue | ++| 1 | 2a00:abcd:82ae:cd33::/64 | :: | enp0s25 | U | ++| 2 | 2a00:abcd:82ae:cd33::/56 | :: | enp0s25 | U | ++| 3 | fd81:123f:654::657 | :: | enp0s25 | U | ++| 4 | fd81:123f:654::/64 | :: | enp0s25 | U | ++| 5 | fd81:123f:654::/48 | :: | enp0s25 | U | ++| 6 | fe80::abcd:ef12:bc34:da21 | :: | enp0s25 | U | ++| 7 | fe80::/64 | :: | enp0s25 | U | +++-------+---------------------------+---------+-----------+-------+ +diff --git a/tests/data/netinfo/sample-iproute-output-v4-missing-gateway b/tests/data/netinfo/sample-iproute-output-v4-missing-gateway +new file mode 100644 +index 0000000..c1e0b3c +--- /dev/null ++++ b/tests/data/netinfo/sample-iproute-output-v4-missing-gateway +@@ -0,0 +1 @@ ++192.168.2.0/24 dev enp0s25 proto kernel scope link src 192.168.2.18 metric 100 +diff --git a/tests/data/netinfo/sample-iproute-output-v6-missing-gateway b/tests/data/netinfo/sample-iproute-output-v6-missing-gateway +new file mode 100644 +index 0000000..ffab1fa +--- /dev/null ++++ b/tests/data/netinfo/sample-iproute-output-v6-missing-gateway +@@ -0,0 +1,10 @@ ++2a00:abcd:82ae:cd33::657 dev enp0s25 proto kernel metric 256 expires 2334sec pref medium ++2a00:abcd:82ae:cd33::/64 dev enp0s25 proto ra metric 100 pref medium ++2a00:abcd:82ae:cd33::/56 dev enp0s25 proto ra metric 100 pref medium ++fd81:123f:654::657 dev enp0s25 proto kernel metric 256 pref medium ++fd81:123f:654::/64 dev enp0s25 proto ra metric 100 pref medium ++fd81:123f:654::/48 dev enp0s25 proto ra metric 100 pref medium ++fe80::abcd:ef12:bc34:da21 dev enp0s25 proto static metric 100 pref medium ++fe80::/64 dev enp0s25 proto kernel metric 256 pref medium ++local ::1 dev lo table local proto none metric 0 pref medium ++local 2600:1f16:b80:ad00:90a:c915:bca6:5ff2 dev lo table local proto none metric 0 pref medium +diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py +index e44b16d..6619e0d 100644 +--- a/cloudinit/tests/test_netinfo.py ++++ b/cloudinit/tests/test_netinfo.py +@@ -15,10 +15,19 @@ SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") + SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") + SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") + SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") ++SAMPLE_IPROUTE_OUT_V6_MISSING_GATEWAY = readResource( ++ "netinfo/sample-iproute-output-v6-missing-gateway" ++) ++SAMPLE_IPROUTE_OUT_V4_MISSING_GATEWAY = readResource( ++ "netinfo/sample-iproute-output-v4-missing-gateway" ++) + SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") + SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") + NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") + ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") ++ROUTE_FORMATTED_OUT_MISSING_GATEWAY = readResource( ++ "netinfo/route-formatted-output-missing-gateway" ++) + FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") + + +@@ -165,6 +174,24 @@ class TestNetInfo(CiTestCase): + content = route_pformat() + self.assertEqual(ROUTE_FORMATTED_OUT, content) + ++ @mock.patch("cloudinit.netinfo.subp.which") ++ @mock.patch("cloudinit.netinfo.subp.subp") ++ def test_route_iproute_pformat_missing_gateway(self, m_subp, m_which): ++ """route_pformat properly rendering info with missing gateway.""" ++ ++ def subp_iproute_selector(*args, **kwargs): ++ if ["ip", "-o", "route", "list"] == args[0]: ++ return (SAMPLE_IPROUTE_OUT_V4_MISSING_GATEWAY, "") ++ v6cmd = ["ip", "--oneline", "-6", "route", "list", "table", "all"] ++ if v6cmd == args[0]: ++ return (SAMPLE_IPROUTE_OUT_V6_MISSING_GATEWAY, "") ++ raise RuntimeError("Unexpected subp call %s" % args[0]) ++ ++ m_subp.side_effect = subp_iproute_selector ++ m_which.side_effect = lambda x: x if x == "ip" else None ++ content = route_pformat() ++ assert ROUTE_FORMATTED_OUT_MISSING_GATEWAY == content ++ + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_route_warn_on_missing_commands(self, m_subp, m_which): +-- +2.33.0 + + diff --git a/backport-Fix-KeyError-when-rendering-sysconfig-IPv6-routes.patch b/backport-Fix-KeyError-when-rendering-sysconfig-IPv6-routes.patch new file mode 100644 index 0000000000000000000000000000000000000000..2839b635f189ffa2d85736a228a8eca98d46fa68 --- /dev/null +++ b/backport-Fix-KeyError-when-rendering-sysconfig-IPv6-routes.patch @@ -0,0 +1,291 @@ +From 9fabfff5aac6e1b398e18429e6dfad54f59e7f75 Mon Sep 17 00:00:00 2001 +From: James Falcon +Date: Thu, 14 Apr 2022 09:31:17 -0500 +Subject: [PATCH] Fix KeyError when rendering sysconfig IPv6 routes (#1380) + +Reference:https://github.com/canonical/cloud-init/commit/9fabfff5aac6e1b398e18429e6dfad54f59e7f75 +Conflict:(1)do not delete ipv6_mask_to_net_prefix. +(2)do not add "NM_CONTROLLED=no" in test because of cloud-init-20.4-nm-controlled.patch. +(3)format diffs. + +Route rendering code was expecting a netmask rather than using the +prefix. A prefix is provided to the renderer, but was being hidden +from the route rendering code. This commit exposes the prefix and +prefers it for IPv6, given how uncommon netmasks are for IPv6. + +LP: #1958506 +--- + cloudinit/net/network_state.py | 4 +- + cloudinit/net/renderer.py | 4 +- + cloudinit/net/sysconfig.py | 41 +++++++------- + tests/unittests/test_net.py | 100 +++++++++++++++++++++++++++++++++ + 4 files changed, 124 insertions(+), 25 deletions(-) + +diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py +index 0a4e0c6..0d81291 100644 +--- a/cloudinit/net/network_state.py ++++ b/cloudinit/net/network_state.py +@@ -885,12 +885,12 @@ def _normalize_net_keys(network, address_keys=()): + else: + # this supports input of 255.255.255.0 + prefix = ipv4_mask_to_net_prefix(maybe_prefix) ++ elif "prefix" in net: ++ prefix = int(net["prefix"]) + elif netmask and not ipv6: + prefix = ipv4_mask_to_net_prefix(netmask) + elif netmask and ipv6: + prefix = ipv6_mask_to_net_prefix(netmask) +- elif 'prefix' in net: +- prefix = int(net['prefix']) + else: + prefix = 64 if ipv6 else 24 + +diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py +index 54a83b5..e387173 100644 +--- a/cloudinit/net/renderer.py ++++ b/cloudinit/net/renderer.py +@@ -8,7 +8,7 @@ + import abc + import io + +-from cloudinit.net.network_state import parse_net_config_data ++from cloudinit.net.network_state import NetworkState, parse_net_config_data + from cloudinit.net.udev import generate_udev_rule + + +@@ -32,7 +32,7 @@ class Renderer(object): + pass + + @staticmethod +- def _render_persistent_net(network_state): ++ def _render_persistent_net(network_state: NetworkState): + """Given state, emit udev rules to map mac to ifname.""" + # TODO(harlowja): this seems shared between eni renderer and + # this, so move it to a shared location. +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 4c6caef..6d17d69 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -4,6 +4,7 @@ import copy + import io + import os + import re ++from typing import Mapping + + from configobj import ConfigObj + +@@ -17,6 +18,7 @@ from cloudinit.net import network_state + from . import renderer + from .network_state import ( + is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) ++from cloudinit.net.network_state import NetworkState + + LOG = logging.getLogger(__name__) + KNOWN_DISTROS = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora', +@@ -178,7 +180,6 @@ class Route(ConfigMap): + + index = key.replace("ADDRESS", "") + address_value = str(self._conf[key]) +- netmask_value = str(self._conf["NETMASK" + index]) + gateway_value = str(self._conf["GATEWAY" + index]) + + # only accept combinations: +@@ -188,6 +189,7 @@ class Route(ConfigMap): + # do not add ipv4 routes if proto is ipv6 + # (this array will contain a mix of ipv4 and ipv6) + if proto == "ipv4" and not self.is_ipv6_route(address_value): ++ netmask_value = str(self._conf["NETMASK" + index]) + # increase IPv4 index + reindex = reindex + 1 + buf.write( +@@ -209,9 +211,7 @@ class Route(ConfigMap): + % ("METRIC" + str(reindex), _quote_value(metric_value)) + ) + elif proto == "ipv6" and self.is_ipv6_route(address_value): +- prefix_value = network_state.ipv6_mask_to_net_prefix( +- netmask_value +- ) ++ prefix_value = str(self._conf[f"PREFIX{index}"]) + metric_value = ( + "metric " + str(self._conf["METRIC" + index]) + if "METRIC" + index in self._conf +@@ -604,12 +604,9 @@ class Renderer(renderer.Renderer): + raise ValueError("Duplicate declaration of default " + "route found for interface '%s'" + % (iface_cfg.name)) +- # NOTE(harlowja): ipv6 and ipv4 default gateways +- gw_key = 'GATEWAY0' +- nm_key = 'NETMASK0' +- addr_key = 'ADDRESS0' +- # The owning interface provides the default route. +- # ++ # NOTE that instead of defining the route0 settings, ++ # the owning interface provides the default route. ++ + # TODO(harlowja): add validation that no other iface has + # also provided the default route? + iface_cfg['DEFROUTE'] = True +@@ -626,19 +623,19 @@ class Renderer(renderer.Renderer): + iface_cfg['METRIC'] = route['metric'] + + else: +- gw_key = 'GATEWAY%s' % route_cfg.last_idx +- nm_key = 'NETMASK%s' % route_cfg.last_idx +- addr_key = 'ADDRESS%s' % route_cfg.last_idx +- metric_key = 'METRIC%s' % route_cfg.last_idx +- route_cfg.last_idx += 1 + # add default routes only to ifcfg files, not + # to route-* or route6-* +- for (old_key, new_key) in [('gateway', gw_key), +- ('metric', metric_key), +- ('netmask', nm_key), +- ('network', addr_key)]: ++ for old_key, new_name in [ ++ ("gateway", "GATEWAY"), ++ ("metric", "METRIC"), ++ ("prefix", "PREFIX"), ++ ("netmask", "NETMASK"), ++ ("network", "ADDRESS"), ++ ]: + if old_key in route: ++ new_key = f"{new_name}{route_cfg.last_idx}" + route_cfg[new_key] = route[old_key] ++ route_cfg.last_idx += 1 + + @classmethod + def _render_bonding_opts(cls, iface_cfg, iface, flavor): +@@ -890,7 +887,7 @@ class Renderer(renderer.Renderer): + '''Given state, return /etc/sysconfig files + contents''' + if not templates: + templates = cls.templates +- iface_contents = {} ++ iface_contents: Mapping[str, NetInterface] = {} + for iface in network_state.iter_interfaces(): + if iface['type'] == "loopback": + continue +@@ -922,7 +919,9 @@ class Renderer(renderer.Renderer): + contents[cpath] = iface_cfg.routes.to_string(proto) + return contents + +- def render_network_state(self, network_state, templates=None, target=None): ++ def render_network_state( ++ self, network_state: NetworkState, templates=None, target=None ++ ): + if not templates: + templates = self.templates + file_mode = 0o644 +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 8737a76..a698c65 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -3871,6 +3871,106 @@ USERCTL=no + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + ++ def test_from_v2_routes(self): ++ """verify routes (including IPv6) get rendered using v2 config. ++ LP: #1958506 ++ """ ++ v2_data = { ++ "version": 2, ++ "ethernets": { ++ "eth0": { ++ "addresses": [ ++ "10.54.2.19/21", ++ "2a00:1730:fff9:100::52/128", ++ ], ++ "gateway4": "10.54.0.1", ++ "gateway6": "2a00:1730:fff9:100::1", ++ "match": {"macaddress": "52:54:00:3f:fc:f7"}, ++ "mtu": 1400, ++ "nameservers": { ++ "addresses": [ ++ "10.52.1.1", ++ "10.52.1.71", ++ "2001:4860:4860::8888", ++ "2001:4860:4860::8844", ++ ] ++ }, ++ "routes": [ ++ { ++ "scope": "link", ++ "to": "10.54.0.1/32", ++ "via": "0.0.0.0", ++ }, ++ { ++ "scope": "link", ++ "to": "0.0.0.0/0", ++ "via": "10.54.0.1", ++ }, ++ { ++ "scope": "link", ++ "to": "2a00:1730:fff9:100::1/128", ++ "via": "::0", ++ }, ++ { ++ "scope": "link", ++ "to": "::0/0", ++ "via": "2a00:1730:fff9:100::1", ++ }, ++ ], ++ "set-name": "eth0", ++ } ++ }, ++ } ++ ++ expected = { ++ "ifcfg-eth0": textwrap.dedent( ++ """\ ++ # Created by cloud-init on instance boot automatically, do not edit. ++ # ++ BOOTPROTO=none ++ DEFROUTE=yes ++ DEVICE=eth0 ++ DNS1=10.52.1.1 ++ DNS2=10.52.1.71 ++ DNS3=2001:4860:4860::8888 ++ GATEWAY=10.54.0.1 ++ HWADDR=52:54:00:3f:fc:f7 ++ IPADDR=10.54.2.19 ++ IPV6ADDR=2a00:1730:fff9:100::52/128 ++ IPV6INIT=yes ++ IPV6_AUTOCONF=no ++ IPV6_DEFAULTGW=2a00:1730:fff9:100::1 ++ IPV6_FORCE_ACCEPT_RA=no ++ MTU=1400 ++ NETMASK=255.255.248.0 ++ ONBOOT=yes ++ TYPE=Ethernet ++ USERCTL=no ++ """ # noqa: E501 ++ ), ++ "route-eth0": textwrap.dedent( ++ """\ ++ # Created by cloud-init on instance boot automatically, do not edit. ++ # ++ ADDRESS0=10.54.0.1 ++ GATEWAY0=0.0.0.0 ++ NETMASK0=255.255.255.255 ++ """ # noqa: E501 ++ ), ++ "route6-eth0": textwrap.dedent( ++ """\ ++ # Created by cloud-init on instance boot automatically, do not edit. ++ # ++ 2a00:1730:fff9:100::1/128 via ::0 dev eth0 ++ ::0/64 via 2a00:1730:fff9:100::1 dev eth0 ++ """ # noqa: E501 ++ ), ++ } ++ ++ found = self._render_and_read(network_config=v2_data) ++ self._compare_files_to_expected(expected, found) ++ self._assert_headers(found) ++ + + @mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", +-- +2.33.0 + + diff --git a/backport-Fix-default-route-rendering-on-v2-ipv6-1973.patch b/backport-Fix-default-route-rendering-on-v2-ipv6-1973.patch new file mode 100644 index 0000000000000000000000000000000000000000..0405542263ab6fa2be6e476f144780c2546e7dec --- /dev/null +++ b/backport-Fix-default-route-rendering-on-v2-ipv6-1973.patch @@ -0,0 +1,47 @@ +From 7d57fcff6d32fd706dd745315c0f8f72d94385eb Mon Sep 17 00:00:00 2001 +From: James Falcon +Date: Tue, 24 Jan 2023 08:54:05 -0600 +Subject: [PATCH] Fix default route rendering on v2 ipv6 (#1973) + +Reference:https://github.com/canonical/cloud-init/commit/7d57fcff6d32fd706dd745315c0f8f72d94385eb +Conflict:NA + +::/0 would get rendered as ::/64 rather than ::/0 across all renderers +using ipv6 in a v2 config. + +LP: #2003562 +--- + cloudinit/net/network_state.py | 2 +- + tests/unittests/test_net.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py +index 0d81291..857942a 100644 +--- a/cloudinit/net/network_state.py ++++ b/cloudinit/net/network_state.py +@@ -857,7 +857,7 @@ def _normalize_net_keys(network, address_keys=()): + + @returns: A dict containing normalized prefix and matching addr_key. + """ +- net = dict((k, v) for k, v in network.items() if v) ++ net = {k: v for k, v in network.items() if v or v == 0} + addr_key = None + for key in address_keys: + if net.get(key): +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index a698c65..2d2cff0 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -3962,7 +3962,7 @@ USERCTL=no + # Created by cloud-init on instance boot automatically, do not edit. + # + 2a00:1730:fff9:100::1/128 via ::0 dev eth0 +- ::0/64 via 2a00:1730:fff9:100::1 dev eth0 ++ ::0/0 via 2a00:1730:fff9:100::1 dev eth0 + """ # noqa: E501 + ), + } +-- +2.33.0 + + diff --git a/backport-Fix-exception-when-no-activator-found-1129.patch b/backport-Fix-exception-when-no-activator-found-1129.patch new file mode 100644 index 0000000000000000000000000000000000000000..a07b915a006baa6fb20d84208bad0b0ca0bc91b0 --- /dev/null +++ b/backport-Fix-exception-when-no-activator-found-1129.patch @@ -0,0 +1,89 @@ +From ffa6fc88249aa080aa31811a45569a45e567418a Mon Sep 17 00:00:00 2001 +From: James Falcon +Date: Thu, 2 Dec 2021 22:36:37 -0600 +Subject: [PATCH] Fix exception when no activator found (#1129) + +Reference:https://github.com/canonical/cloud-init/commit/ffa6fc88249aa080aa31811a45569a45e567418a +Conflict:NA + +Given that there are additional network management tools that we haven't +yet supported with activators, we should log a warning and continue +without network activation here, especially since this was a no-op for +years. + +LP: #1948681 +--- + cloudinit/distros/__init__.py | 7 ++++++- + cloudinit/net/activators.py | 6 +++++- + tests/unittests/test_net_activators.py | 5 +++-- + 3 files changed, 14 insertions(+), 4 deletions(-) + +diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py +index cf6aad14..fe44f20e 100755 +--- a/cloudinit/distros/__init__.py ++++ b/cloudinit/distros/__init__.py +@@ -228,7 +228,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): + # Now try to bring them up + if bring_up: + LOG.debug('Bringing up newly configured network interfaces') +- network_activator = activators.select_activator() ++ try: ++ network_activator = activators.select_activator() ++ except activators.NoActivatorException: ++ LOG.warning("No network activator found, not bringing up " ++ "network interfaces") ++ return True + network_activator.bring_up_all_interfaces(network_state) + else: + LOG.debug("Not bringing up newly configured network interfaces") +diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py +index 11149548..137338d8 100644 +--- a/cloudinit/net/activators.py ++++ b/cloudinit/net/activators.py +@@ -16,6 +16,10 @@ from cloudinit.net.sysconfig import NM_CFG_FILE + LOG = logging.getLogger(__name__) + + ++class NoActivatorException(Exception): ++ pass ++ ++ + def _alter_interface(cmd, device_name) -> bool: + LOG.debug("Attempting command %s for device %s", cmd, device_name) + try: +@@ -271,7 +275,7 @@ def select_activator(priority=None, target=None) -> Type[NetworkActivator]: + tmsg = "" + if target and target != "/": + tmsg = " in target=%s" % target +- raise RuntimeError( ++ raise NoActivatorException( + "No available network activators found%s. Searched " + "through list: %s" % (tmsg, priority)) + selected = found[0] +diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py +index f63a8b74..9da21195 100644 +--- a/tests/unittests/test_net_activators.py ++++ b/tests/unittests/test_net_activators.py +@@ -12,7 +12,8 @@ from cloudinit.net.activators import ( + IfUpDownActivator, + NetplanActivator, + NetworkManagerActivator, +- NetworkdActivator ++ NetworkdActivator, ++ NoActivatorException, + ) + from cloudinit.net.network_state import parse_net_config_data + from cloudinit.safeyaml import load +@@ -99,7 +100,7 @@ class TestSearchAndSelect: + resp = search_activator() + assert resp == [] + +- with pytest.raises(RuntimeError): ++ with pytest.raises(NoActivatorException): + select_activator() + + +-- +2.33.0 + + diff --git a/backport-Fix-network-v2-metric-rendering-4220.patch b/backport-Fix-network-v2-metric-rendering-4220.patch new file mode 100644 index 0000000000000000000000000000000000000000..834300b78e33c54e12ac6be047ca1c9a3852f7d1 --- /dev/null +++ b/backport-Fix-network-v2-metric-rendering-4220.patch @@ -0,0 +1,388 @@ +From c68305a91791e28031df8b9ebd33bfe7ffd7e75d Mon Sep 17 00:00:00 2001 +From: James Falcon +Date: Tue, 4 Jul 2023 07:00:22 -0500 +Subject: [PATCH] Fix network v2 metric rendering (#4220) + +Reference:https://github.com/canonical/cloud-init/commit/c68305a91791e28031df8b9ebd33bfe7ffd7e75d +Conflict:(1)change 'small' to 'small_v1' +(2)do not change TestNetworkManagerRendering. +(3)do not add "NM_CONTROLLED=no" in test because of cloud-init-20.4-nm-controlled.patch. +(4)format diffs. + +Metric info was not being included in v2-based routes. + +Fixes GH-4217 +--- + cloudinit/net/network_state.py | 4 +- + tests/unittests/test_net.py | 232 ++++++++++++++++++++++++++++++--- + 2 files changed, 216 insertions(+), 20 deletions(-) + +diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py +index 4862bf9..ac44304 100644 +--- a/cloudinit/net/network_state.py ++++ b/cloudinit/net/network_state.py +@@ -187,7 +187,6 @@ class NetworkState(object): + + + class NetworkStateInterpreter(metaclass=CommandHandlerMeta): +- + initial_network_state = { + 'interfaces': {}, + 'routes': [], +@@ -582,7 +581,6 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): + self._handle_bond_bridge(command, cmd_type='bond') + + def handle_bridges(self, command): +- + ''' + v2_command = { + br0: { +@@ -815,7 +813,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): + routes = [] + for route in cfg.get('routes', []): + routes.append(_normalize_route( +- {'destination': route.get('to'), 'gateway': route.get('via')})) ++ {'destination': route.get('to'), 'gateway': route.get('via'), "metric": route.get("metric"),})) + + # v2 routes are bound to the interface, in v1 we add them under + # the first subnet since there isn't an equivalent interface level. +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 7cde102..764e1c7 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -818,7 +818,7 @@ iface eth1 inet static + """.lstrip() + + NETWORK_CONFIGS = { +- 'small': { ++ 'small_v1': { + 'expected_networkd_eth99': textwrap.dedent("""\ + [Match] + Name=eth99 +@@ -959,6 +959,164 @@ NETWORK_CONFIGS = { + - wark.maas + """), + }, ++ # We test a separate set of configs here because v2 doesn't support ++ # generic nameservers, so that aspect needs to be modified ++ "small_v2": { ++ "expected_networkd_eth99": textwrap.dedent( ++ """\ ++ [Match] ++ Name=eth99 ++ MACAddress=c0:d6:9f:2c:e8:80 ++ [Address] ++ Address=192.168.21.3/24 ++ [Network] ++ DHCP=ipv4 ++ Domains=barley.maas sach.maas ++ DNS=8.8.8.8 8.8.4.4 ++ [Route] ++ Gateway=65.61.151.37 ++ Destination=0.0.0.0/0 ++ Metric=10000 ++ """ ++ ).rstrip(" "), ++ "expected_networkd_eth1": textwrap.dedent( ++ """\ ++ [Match] ++ Name=eth1 ++ MACAddress=cf:d6:af:48:e8:80 ++ [Network] ++ DHCP=no ++ """ ++ ).rstrip(" "), ++ "expected_eni": textwrap.dedent( ++ """\ ++ auto lo ++ iface lo inet loopback ++ dns-nameservers 8.8.8.8 8.8.4.4 ++ dns-search wark.maas ++ iface eth1 inet manual ++ auto eth99 ++ iface eth99 inet dhcp ++ # control-alias eth99 ++ iface eth99 inet static ++ address 192.168.21.3/24 ++ dns-nameservers 8.8.8.8 8.8.4.4 ++ dns-search barley.maas sach.maas ++ post-up route add default gw 65.61.151.37 metric 10000 || true ++ pre-down route del default gw 65.61.151.37 metric 10000 || true ++ """ ++ ).rstrip(" "), ++ "expected_sysconfig_opensuse": { ++ "ifcfg-eth1": textwrap.dedent( ++ """\ ++ BOOTPROTO=static ++ LLADDR=cf:d6:af:48:e8:80 ++ STARTMODE=auto""" ++ ), ++ "ifcfg-eth99": textwrap.dedent( ++ """\ ++ BOOTPROTO=dhcp4 ++ LLADDR=c0:d6:9f:2c:e8:80 ++ IPADDR=192.168.21.3 ++ NETMASK=255.255.255.0 ++ STARTMODE=auto""" ++ ), ++ }, ++ "expected_sysconfig_rhel": { ++ "ifcfg-eth1": textwrap.dedent( ++ """\ ++ BOOTPROTO=none ++ DEVICE=eth1 ++ HWADDR=cf:d6:af:48:e8:80 ++ ONBOOT=yes ++ TYPE=Ethernet ++ USERCTL=no""" ++ ), ++ "ifcfg-eth99": textwrap.dedent( ++ """\ ++ BOOTPROTO=dhcp ++ DEFROUTE=yes ++ DEVICE=eth99 ++ DHCLIENT_SET_DEFAULT_ROUTE=yes ++ DNS1=8.8.8.8 ++ DNS2=8.8.4.4 ++ DOMAIN="barley.maas sach.maas" ++ GATEWAY=65.61.151.37 ++ HWADDR=c0:d6:9f:2c:e8:80 ++ IPADDR=192.168.21.3 ++ NETMASK=255.255.255.0 ++ METRIC=10000 ++ ONBOOT=yes ++ TYPE=Ethernet ++ USERCTL=no""" ++ ), ++ }, ++ "expected_network_manager": { ++ "cloud-init-eth1.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ [connection] ++ id=cloud-init eth1 ++ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 ++ autoconnect-priority=120 ++ type=ethernet ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ [ethernet] ++ mac-address=CF:D6:AF:48:E8:80 ++ """ ++ ), ++ "cloud-init-eth99.nmconnection": textwrap.dedent( ++ """\ ++ # Generated by cloud-init. Changes will be lost. ++ [connection] ++ id=cloud-init eth99 ++ uuid=b1b88000-1f03-5360-8377-1a2205efffb4 ++ autoconnect-priority=120 ++ type=ethernet ++ [user] ++ org.freedesktop.NetworkManager.origin=cloud-init ++ [ethernet] ++ mac-address=C0:D6:9F:2C:E8:80 ++ [ipv4] ++ method=auto ++ may-fail=false ++ route1=0.0.0.0/0,65.61.151.37 ++ address1=192.168.21.3/24 ++ dns=8.8.8.8;8.8.4.4; ++ dns-search=barley.maas;sach.maas; ++ """ ++ ), ++ }, ++ "yaml": textwrap.dedent( ++ """ ++ version: 2 ++ ethernets: ++ eth1: ++ match: ++ macaddress: cf:d6:af:48:e8:80 ++ set-name: eth1 ++ eth99: ++ addresses: ++ - 192.168.21.3/24 ++ dhcp4: true ++ match: ++ macaddress: c0:d6:9f:2c:e8:80 ++ nameservers: ++ addresses: ++ - 8.8.8.8 ++ - 8.8.4.4 ++ search: ++ - barley.maas ++ - sach.maas ++ routes: ++ - metric: 10000 ++ to: 0.0.0.0/0 ++ via: 65.61.151.37 ++ set-name: eth99 ++ """ ++ ), ++ }, + 'v4_and_v6': { + 'expected_networkd': textwrap.dedent("""\ + [Match] +@@ -2965,7 +3123,6 @@ iface eth1 inet dhcp + mock.Mock(return_value=False) + ) + class TestRhelSysConfigRendering(CiTestCase): +- + with_logs = True + + nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" +@@ -3286,8 +3443,14 @@ USERCTL=no + 'WARNING: Network config: ignoring eth0.101 device-level mtu', + self.logs.getvalue()) + +- def test_small_config(self): +- entry = NETWORK_CONFIGS['small'] ++ def test_small_config_v1(self): ++ entry = NETWORK_CONFIGS["small_v1"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ def test_small_config_v2(self): ++ entry = NETWORK_CONFIGS["small_v2"] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) +@@ -3429,7 +3592,7 @@ USERCTL=no + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read +- entry = NETWORK_CONFIGS['small'] ++ entry = NETWORK_CONFIGS['small_v1'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) +@@ -3450,7 +3613,7 @@ USERCTL=no + util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n') + + # render and read +- entry = NETWORK_CONFIGS['small'] ++ entry = NETWORK_CONFIGS['small_v1'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) +@@ -3476,7 +3639,7 @@ USERCTL=no + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read +- entry = NETWORK_CONFIGS['small'] ++ entry = NETWORK_CONFIGS['small_v1'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) +@@ -3644,7 +3807,6 @@ USERCTL=no + mock.Mock(return_value=False) + ) + class TestOpenSuseSysConfigRendering(CiTestCase): +- + with_logs = True + + scripts_dir = '/etc/sysconfig/network' +@@ -3916,8 +4078,14 @@ STARTMODE=auto + 'WARNING: Network config: ignoring eth0.101 device-level mtu', + self.logs.getvalue()) + +- def test_small_config(self): +- entry = NETWORK_CONFIGS['small'] ++ def test_small_config_v1(self): ++ entry = NETWORK_CONFIGS["small_v1"] ++ found = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self._compare_files_to_expected(entry[self.expected_name], found) ++ self._assert_headers(found) ++ ++ def test_small_config_v2(self): ++ entry = NETWORK_CONFIGS["small_v1"] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) +@@ -4539,7 +4707,6 @@ class TestReadInitramfsConfig(CiTestCase): + + + class TestNetplanRoundTrip(CiTestCase): +- + NETPLAN_INFO_OUT = textwrap.dedent(""" + netplan.io: + features: +@@ -4596,7 +4763,7 @@ class TestNetplanRoundTrip(CiTestCase): + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_small_netplan(self): +- entry = NETWORK_CONFIGS['small'] ++ entry = NETWORK_CONFIGS['small_v1'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), +@@ -4781,8 +4948,17 @@ class TestEniRoundTrip(CiTestCase): + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + +- def testsimple_render_small(self): +- entry = NETWORK_CONFIGS['small'] ++ def testsimple_render_small_v1(self): ++ entry = NETWORK_CONFIGS["small_v1"] ++ files = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ self.assertEqual( ++ entry["expected_eni"].splitlines(), ++ files["/etc/network/interfaces"].splitlines(), ++ ) ++ ++ @pytest.mark.xfail(reason="GH-4219") ++ def testsimple_render_small_v2(self): ++ entry = NETWORK_CONFIGS["small_v2"] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), +@@ -5105,10 +5281,33 @@ class TestNetworkdRoundTrip(CiTestCase): + return dir2dict(dir) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) +- def testsimple_render_small_networkd(self, m_chown): ++ def testsimple_render_small_networkd_v1(self, m_chown): ++ nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" ++ nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" ++ entry = NETWORK_CONFIGS["small_v1"] ++ files = self._render_and_read(network_config=yaml.load(entry["yaml"])) ++ ++ actual = files[nwk_fn1].splitlines() ++ actual = self.create_conf_dict(actual) ++ ++ expected = entry["expected_networkd_eth99"].splitlines() ++ expected = self.create_conf_dict(expected) ++ ++ self.compare_dicts(actual, expected) ++ ++ actual = files[nwk_fn2].splitlines() ++ actual = self.create_conf_dict(actual) ++ ++ expected = entry["expected_networkd_eth1"].splitlines() ++ expected = self.create_conf_dict(expected) ++ ++ self.compare_dicts(actual, expected) ++ ++ @mock.patch("cloudinit.net.util.chownbyname", return_value=True) ++ def testsimple_render_small_networkd_v2(self, m_chown): + nwk_fn1 = '/etc/systemd/network/10-cloud-init-eth99.network' + nwk_fn2 = '/etc/systemd/network/10-cloud-init-eth1.network' +- entry = NETWORK_CONFIGS['small'] ++ entry = NETWORK_CONFIGS['small_v2'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + + actual = files[nwk_fn1].splitlines() +@@ -5735,7 +5934,6 @@ class TestInterfacesSorting(CiTestCase): + mock.Mock(return_value=False) + ) + class TestGetIBHwaddrsByInterface(CiTestCase): +- + _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' + _ib_addr_eth_format = '00:11:22:33:44:56' + _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1', +-- +2.33.0 + + diff --git a/backport-Handle-non-existent-ca-cert-config-situation-2073.patch b/backport-Handle-non-existent-ca-cert-config-situation-2073.patch new file mode 100644 index 0000000000000000000000000000000000000000..ddcc09e615ae792221a9d94623be23ce68edb384 --- /dev/null +++ b/backport-Handle-non-existent-ca-cert-config-situation-2073.patch @@ -0,0 +1,37 @@ +From 3634678465e7b8f8608bcb9a1f5773ae7837cbe9 Mon Sep 17 00:00:00 2001 +From: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> +Date: Fri, 17 Mar 2023 03:01:22 +0530 +Subject: [PATCH] Handle non existent ca-cert-config situation (#2073) + +Reference:https://github.com/canonical/cloud-init/commit/3634678465e7b8f8608bcb9a1f5773ae7837cbe9 +Conflict:only check cert file + +Currently if a cert file doesn't exist, cc_ca_certs module crashes +This fix makes it possible to handle it gracefully. + +Also, out_lines variable may not be available if os.stat returns 0. +This issue is also taken care of. + +Added tests for the same. +--- + cloudinit/config/cc_ca_certs.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py +index bd7bead..2b3210b 100644 +--- a/cloudinit/config/cc_ca_certs.py ++++ b/cloudinit/config/cc_ca_certs.py +@@ -110,7 +110,8 @@ def update_cert_config(distro_cfg): + + @param distro_cfg: A hash providing _distro_ca_certs_configs function. + """ +- if distro_cfg['ca_cert_config'] is None: ++ ca_cert_cfg_fn = distro_cfg["ca_cert_config"] ++ if not ca_cert_cfg_fn or not os.path.exists(ca_cert_cfg_fn): + return + if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + # If the CA_CERT_CONFIG file is empty (i.e. all existing +-- +2.33.0 + + diff --git a/backport-Ignore-duplicate-macs-from-mscc_felix-and-fsl_enetc.patch b/backport-Ignore-duplicate-macs-from-mscc_felix-and-fsl_enetc.patch new file mode 100644 index 0000000000000000000000000000000000000000..b42ab5d29b9306cda448cc6410a50984f5185846 --- /dev/null +++ b/backport-Ignore-duplicate-macs-from-mscc_felix-and-fsl_enetc.patch @@ -0,0 +1,94 @@ +From 4610833d1e9a0839321f84bbc3c8d27ff19a17f2 Mon Sep 17 00:00:00 2001 +From: James Falcon +Date: Thu, 2 Feb 2023 10:13:08 -0600 +Subject: [PATCH] Ignore duplicate macs from mscc_felix and fsl_enetc + +Reference:https://github.com/canonical/cloud-init/commit/4610833d1e9a0839321f84bbc3c8d27ff19a17f2 +Conflict:import does_not_raise diffs. + +mscc_felix and fsl_enetc are drivers representing a switch that is +expected to have duplicate macs. If we encounter either of these +drivers, we should not raise the duplicate mac exception. + +LP: #1997922 +--- + cloudinit/net/__init__.py | 16 ++++++++++++++++ + tests/unittests/test_net.py | 20 ++++++++++++++++++++ + 2 files changed, 36 insertions(+) + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index 96ce6f5..a308c98 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -929,6 +929,22 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict: + % (ret[mac], driver_map[mac], name) + ) + ++ # This is intended to be a short-term fix of LP: #1997922 ++ # Long term, we should better handle configuration of virtual ++ # devices where duplicate MACs are expected early in boot if ++ # cloud-init happens to enumerate network interfaces before drivers ++ # have fully initialized the leader/subordinate relationships for ++ # those devices or switches. ++ if driver == "mscc_felix" or driver == "fsl_enetc": ++ LOG.debug( ++ "Ignoring duplicate macs from '%s' and '%s' due to " ++ "driver '%s'.", ++ name, ++ ret[mac], ++ driver, ++ ) ++ continue ++ + if raise_duplicate_mac_error: + raise RuntimeError(msg) + +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 0db4442..e5dd979 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -11,6 +11,7 @@ from cloudinit import temp_utils + from cloudinit import subp + from cloudinit import util + from cloudinit import safeyaml as yaml ++from contextlib import ExitStack as does_not_raise + + from cloudinit.tests.helpers import ( + CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) +@@ -5515,6 +5516,7 @@ class TestInterfaceHasOwnMac(CiTestCase): + mock.Mock(return_value=False) + ) + class TestGetInterfacesByMac(CiTestCase): ++ with_logs = True + _data = {'bonds': ['bond1'], + 'bridges': ['bridge1'], + 'vlans': ['bond1.101'], +@@ -5680,6 +5682,24 @@ class TestGetInterfacesByMac(CiTestCase): + ib_addr_eth_format: 'ib0', ib_addr: 'ib0'} + self.assertEqual(expected, result) + ++ def test_duplicate_ignored_macs(self): ++ # LP: #199792 ++ self._data = copy.deepcopy(self._data) ++ self._data["macs"]["swp0"] = "9a:57:7d:78:47:c0" ++ self._data["macs"]["swp1"] = "9a:57:7d:78:47:c0" ++ self._data["own_macs"].append("swp0") ++ self._data["own_macs"].append("swp1") ++ self._data["drivers"]["swp0"] = "mscc_felix" ++ self._data["drivers"]["swp1"] = "mscc_felix" ++ self._mock_setup() ++ with does_not_raise(): ++ net.get_interfaces_by_mac() ++ pattern = ( ++ "Ignoring duplicate macs from 'swp[0-1]' and 'swp[0-1]' due to " ++ "driver 'mscc_felix'." ++ ) ++ assert re.search(pattern, self.logs.getvalue()) ++ + + class TestInterfacesSorting(CiTestCase): + +-- +2.33.0 + + diff --git a/backport-Set-ownership-for-new-folders-in-Write-Files-Module-.patch b/backport-Set-ownership-for-new-folders-in-Write-Files-Module-.patch new file mode 100644 index 0000000000000000000000000000000000000000..b5773b0e98ec170c36f194219e7d8400596970b6 --- /dev/null +++ b/backport-Set-ownership-for-new-folders-in-Write-Files-Module-.patch @@ -0,0 +1,165 @@ +From 15a6e0868097ec8a6ef97b9fde59a9486270fc37 Mon Sep 17 00:00:00 2001 +From: Jack +Date: Tue, 21 Feb 2023 23:39:43 +0800 +Subject: [PATCH] Set ownership for new folders in Write Files Module +(#1980) + +Reference:https://github.com/canonical/cloud-init/commit/15a6e0868097ec8a6ef97b9fde59a9486270fc37 +Conflict:(1)cloudinit/config/schemas/schema-cloud-config-v1.json not +change. +(2)tools/.github-cla-signers not change. +(3)tests/integration_tests/modules/test_write_files.py not change. +Integration tests are current Ubuntu-only. +https://github.com/canonical/cloud-init/issues/4290#issuecomment-1660200921 + +The parent directory would be created automatically if it does not +exist. But the ownership of newly-created parent +directory would always be root. + +With this change, it would be set the same as . + +LP: #1990513 +--- + cloudinit/config/cc_write_files.py | 4 +++- + cloudinit/util.py | 38 ++++++++++++++++++++++++++++-- + tests/unittests/test_util.py | 20 ++++++++++++++++ + 3 files changed, 59 insertions(+), 3 deletions(-) + +diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py +index b1678b8..6edaa85 100644 +--- a/cloudinit/config/cc_write_files.py ++++ b/cloudinit/config/cc_write_files.py +@@ -241,7 +241,9 @@ def write_files(name, files): + (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER)) + perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS) + omode = 'ab' if util.get_cfg_option_bool(f_info, 'append') else 'wb' +- util.write_file(path, contents, omode=omode, mode=perms) ++ util.write_file( ++ path, contents, omode=omode, mode=perms, user=u, group=g ++ ) + util.chownbyname(path, u, g) + + +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 2b8adf3..d4a6eed 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -35,6 +35,7 @@ from base64 import b64decode, b64encode + from errno import ENOENT + from collections import namedtuple + from functools import lru_cache, total_ordering ++from pathlib import Path + from urllib import parse + from typing import List + +@@ -1648,12 +1649,41 @@ def json_dumps(data): + raise + + +-def ensure_dir(path, mode=None): ++def get_non_exist_parent_dir(path): ++ """Get the last directory in a path that does not exist. ++ ++ Example: when path=/usr/a/b and /usr/a does not exis but /usr does, ++ return /usr/a ++ """ ++ p_path = os.path.dirname(path) ++ # Check if parent directory of path is root ++ if p_path == os.path.dirname(p_path): ++ return path ++ else: ++ if os.path.isdir(p_path): ++ return path ++ else: ++ return get_non_exist_parent_dir(p_path) ++ ++ ++def ensure_dir(path, mode=None, user=None, group=None): + if not os.path.isdir(path): ++ # Get non existed parent dir first before they are created. ++ non_existed_parent_dir = get_non_exist_parent_dir(path) + # Make the dir and adjust the mode + with SeLinuxGuard(os.path.dirname(path), recursive=True): + os.makedirs(path) + chmod(path, mode) ++ # Change the ownership ++ if user or group: ++ chownbyname(non_existed_parent_dir, user, group) ++ # if path=/usr/a/b/c and non_existed_parent_dir=/usr, ++ # then sub_relative_dir=PosixPath('a/b/c') ++ sub_relative_dir = Path(path.split(non_existed_parent_dir)[1][1:]) ++ sub_path = Path(non_existed_parent_dir) ++ for part in sub_relative_dir.parts: ++ sub_path = sub_path.joinpath(part) ++ chownbyname(sub_path, user, group) + else: + # Just adjust the mode + chmod(path, mode) +@@ -1972,6 +2002,8 @@ def write_file( + preserve_mode=False, + *, + ensure_dir_exists=True, ++ user=None, ++ group=None, + ): + """ + Writes a file with the given content and sets the file mode as specified. +@@ -1986,6 +2018,8 @@ def write_file( + @param ensure_dir_exists: If True (the default), ensure that the directory + containing `filename` exists before writing to + the file. ++ @param user: The user to set on the file. ++ @param group: The group to set on the file. + """ + + if preserve_mode: +@@ -1995,7 +2029,7 @@ def write_file( + pass + + if ensure_dir_exists: +- ensure_dir(os.path.dirname(filename)) ++ ensure_dir(os.path.dirname(filename), user=user, group=group) + if 'b' in omode.lower(): + content = encode_text(content) + write_type = 'bytes' +diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py +index 3fa5059..2ab3bad 100644 +--- a/tests/unittests/test_util.py ++++ b/tests/unittests/test_util.py +@@ -10,6 +10,7 @@ import tempfile + import pytest + import yaml + from unittest import mock ++from pathlib import Path + + from cloudinit import subp + from cloudinit import importer, util +@@ -99,6 +100,25 @@ class TestWriteFile(helpers.TestCase): + self.assertTrue(os.path.isdir(dirname)) + self.assertTrue(os.path.isfile(path)) + ++ def test_dir_ownership(self): ++ """Verifiy that directories is created with appropriate ownership.""" ++ dirname = os.path.join(self.tmp, "subdir", "subdir2") ++ path = os.path.join(dirname, "NewFile.txt") ++ contents = "Hey there" ++ user = "foo" ++ group = "foo" ++ ++ with mock.patch.object( ++ util, "chownbyname", return_value=None ++ ) as mockobj: ++ util.write_file(path, contents, user=user, group=group) ++ ++ calls = [ ++ mock.call(os.path.join(self.tmp, "subdir"), user, group), ++ mock.call(Path(dirname), user, group), ++ ] ++ mockobj.assert_has_calls(calls, any_order=False) ++ + def test_dir_is_not_created_if_ensure_dir_false(self): + """Verify directories are not created if ensure_dir_exists is False.""" + dirname = os.path.join(self.tmp, "subdir") +-- +2.27.0 + diff --git a/backport-cc_ubuntu_advantage-Redact-token-from-logs-1726.patch b/backport-cc_ubuntu_advantage-Redact-token-from-logs-1726.patch new file mode 100644 index 0000000000000000000000000000000000000000..316a7cd4b401f62a34e470e4ca579a842b037fd9 --- /dev/null +++ b/backport-cc_ubuntu_advantage-Redact-token-from-logs-1726.patch @@ -0,0 +1,142 @@ +From 3a32188398675793d87f19363af0497035ea9263 Mon Sep 17 00:00:00 2001 +From: Alberto Contreras +Date: Thu, 15 Sep 2022 22:19:38 +0200 +Subject: [PATCH] cc_ubuntu_advantage: Redact token from logs (#1726) + +Reference:https://github.com/canonical/cloud-init/commit/3a32188398675793d87f19363af0497035ea9263 +Conflict:test diffs. +--- + cloudinit/config/cc_ubuntu_advantage.py | 11 ++++---- + .../config/tests/test_ubuntu_advantage.py | 26 +++++++++---------- + 2 files changed, 19 insertions(+), 18 deletions(-) + +diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py +index e55772a..033d4d2 100644 +--- a/cloudinit/config/cc_ubuntu_advantage.py ++++ b/cloudinit/config/cc_ubuntu_advantage.py +@@ -85,7 +85,7 @@ schema = { + __doc__ = get_schema_doc(schema) # Supplement python help() + + LOG = logging.getLogger(__name__) +- ++REDACTED = "REDACTED" + + def configure_ua(token=None, enable=None): + """Call ua commandline client to attach or enable services.""" +@@ -108,12 +108,13 @@ def configure_ua(token=None, enable=None): + enable = [] + + attach_cmd = ['ua', 'attach', token] +- LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd)) ++ redacted_cmd = attach_cmd[:-1] + [REDACTED] ++ LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(redacted_cmd)) + try: +- subp.subp(attach_cmd) ++ subp.subp(attach_cmd, logstring=redacted_cmd) + except subp.ProcessExecutionError as e: +- msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format( +- error=str(e)) ++ error = str(e).replace(token, REDACTED) ++ msg = f"Failure attaching Ubuntu Advantage:\n{error}" + util.logexc(LOG, msg) + raise RuntimeError(msg) from e + enable_errors = [] +diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py +index db7fb72..eadeddb 100644 +--- a/cloudinit/config/tests/test_ubuntu_advantage.py ++++ b/cloudinit/config/tests/test_ubuntu_advantage.py +@@ -36,23 +36,23 @@ class TestConfigureUA(CiTestCase): + self.assertEqual( + 'Failure attaching Ubuntu Advantage:\nUnexpected error while' + ' running command.\nCommand: -\nExit code: -\nReason: -\n' +- 'Stdout: Invalid token SomeToken\nStderr: -', ++ 'Stdout: Invalid token REDACTED\nStderr: -', + str(context_manager.exception)) + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_with_token(self, m_subp): + """When token is provided, attach the machine to ua using the token.""" + configure_ua(token='SomeToken') +- m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) ++ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'], logstring=["ua", "attach", "REDACTED"],) + self.assertEqual( +- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', ++ 'DEBUG: Attaching to Ubuntu Advantage. ua attach REDACTED\n', + self.logs.getvalue()) + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_on_service_error(self, m_subp): + """all services should be enabled and then any failures raised""" + +- def fake_subp(cmd, capture=None): ++ def fake_subp(cmd, capture=None, logstring=None): + fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] + if cmd in fail_cmds and capture: + svc = cmd[-1] +@@ -65,7 +65,7 @@ class TestConfigureUA(CiTestCase): + configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) + self.assertEqual( + m_subp.call_args_list, +- [mock.call(['ua', 'attach', 'SomeToken']), ++ [mock.call(['ua', 'attach', 'SomeToken'], logstring=["ua", "attach", "REDACTED"],), + mock.call(['ua', 'enable', 'esm'], capture=True), + mock.call(['ua', 'enable', 'cc'], capture=True), + mock.call(['ua', 'enable', 'fips'], capture=True)]) +@@ -87,9 +87,9 @@ class TestConfigureUA(CiTestCase): + def test_configure_ua_attach_with_empty_services(self, m_subp): + """When services is an empty list, do not auto-enable attach.""" + configure_ua(token='SomeToken', enable=[]) +- m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) ++ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'], logstring=["ua", "attach", "REDACTED"],) + self.assertEqual( +- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', ++ 'DEBUG: Attaching to Ubuntu Advantage. ua attach REDACTED\n', + self.logs.getvalue()) + + @mock.patch('%s.subp.subp' % MPATH) +@@ -98,10 +98,10 @@ class TestConfigureUA(CiTestCase): + configure_ua(token='SomeToken', enable=['fips']) + self.assertEqual( + m_subp.call_args_list, +- [mock.call(['ua', 'attach', 'SomeToken']), ++ [mock.call(['ua', 'attach', 'SomeToken'], logstring=["ua", "attach", "REDACTED"],), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( +- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', ++ 'DEBUG: Attaching to Ubuntu Advantage. ua attach REDACTED\n', + self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) +@@ -111,12 +111,12 @@ class TestConfigureUA(CiTestCase): + configure_ua(token='SomeToken', enable='fips') + self.assertEqual( + m_subp.call_args_list, +- [mock.call(['ua', 'attach', 'SomeToken']), ++ [mock.call(['ua', 'attach', 'SomeToken'], logstring=["ua", "attach", "REDACTED"],), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' string; treating as a single enable\n' +- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', ++ 'DEBUG: Attaching to Ubuntu Advantage. ua attach REDACTED\n', + self.logs.getvalue()) + + @mock.patch('%s.subp.subp' % MPATH) +@@ -125,11 +125,11 @@ class TestConfigureUA(CiTestCase): + configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) + self.assertEqual( + m_subp.call_args_list, +- [mock.call(['ua', 'attach', 'SomeToken'])]) ++ [mock.call(['ua', 'attach', 'SomeToken'], logstring=["ua", "attach", "REDACTED"],)]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' dict; skipping enabling services\n' +- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', ++ 'DEBUG: Attaching to Ubuntu Advantage. ua attach REDACTED\n', + self.logs.getvalue()) + + +-- +2.33.0 + + diff --git a/backport-macs-ignore-duplicate-MAC-for-devs-with-driver-drive.patch b/backport-macs-ignore-duplicate-MAC-for-devs-with-driver-drive.patch new file mode 100644 index 0000000000000000000000000000000000000000..8a13f032a6eed0873abb622b855d30bab2632ef6 --- /dev/null +++ b/backport-macs-ignore-duplicate-MAC-for-devs-with-driver-drive.patch @@ -0,0 +1,97 @@ +From ceb66d4552e6c4c0da60cb08fdf208fb90c34660 Mon Sep 17 00:00:00 2001 +From: Chad Smith +Date: Thu, 23 Mar 2023 06:08:12 -0600 +Subject: [PATCH] macs: ignore duplicate MAC for devs with driver driver + qmi_wwan (#2090) + +Reference:https://github.com/canonical/cloud-init/commit/ceb66d4552e6c4c0da60cb08fdf208fb90c34660 +Conflict:NA + +Another physical modem which has duplicate MAC addresses. +Cloud-init needs to ignore the subordinate devices which are +associated with the qmi_wwan driver. + +Fixes network rendering for the following modems: +Quectel EG25 +Quectel RM510Q-GLHA +Sierra Wireless MC7455 + +LP: #2008888 +--- + cloudinit/net/__init__.py | 2 +- + tests/unittests/test_net.py | 40 ++++++++++++++++++++++++++----------- + 2 files changed, 29 insertions(+), 13 deletions(-) + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index a308c98..a503210 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -935,7 +935,7 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict: + # cloud-init happens to enumerate network interfaces before drivers + # have fully initialized the leader/subordinate relationships for + # those devices or switches. +- if driver == "mscc_felix" or driver == "fsl_enetc": ++ if driver in ("fsl_enetc", "mscc_felix", "qmi_wwan"): + LOG.debug( + "Ignoring duplicate macs from '%s' and '%s' due to " + "driver '%s'.", +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index e5dd979..7cde102 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -5682,23 +5682,39 @@ class TestGetInterfacesByMac(CiTestCase): + ib_addr_eth_format: 'ib0', ib_addr: 'ib0'} + self.assertEqual(expected, result) + +- def test_duplicate_ignored_macs(self): +- # LP: #199792 +- self._data = copy.deepcopy(self._data) +- self._data["macs"]["swp0"] = "9a:57:7d:78:47:c0" +- self._data["macs"]["swp1"] = "9a:57:7d:78:47:c0" +- self._data["own_macs"].append("swp0") +- self._data["own_macs"].append("swp1") +- self._data["drivers"]["swp0"] = "mscc_felix" +- self._data["drivers"]["swp1"] = "mscc_felix" +- self._mock_setup() ++ ++@pytest.mark.parametrize("driver", ("mscc_felix", "fsl_enetc", "qmi_wwan")) ++@mock.patch("cloudinit.net.get_sys_class_path") ++@mock.patch("cloudinit.util.system_info", return_value={"variant": "ubuntu"}) ++class TestDuplicateMac: ++ def test_duplicate_ignored_macs( ++ self, _get_system_info, get_sys_class_path, driver, tmpdir, caplog ++ ): ++ # Create sysfs representation of network devices and drivers in tmpdir ++ sys_net_path = tmpdir.join("class/net") ++ get_sys_class_path.return_value = sys_net_path.strpath + "/" ++ net_data = { ++ "swp0/address": "9a:57:7d:78:47:c0", ++ "swp0/addr_assign_type": "0", ++ "swp0/device/dev_id": "something", ++ "swp1/address": "9a:57:7d:78:47:c0", ++ "swp1/addr_assign_type": "0", ++ "swp1/device/dev_id": "something else", ++ } ++ populate_dir(sys_net_path.strpath, net_data) ++ # Symlink for device driver ++ driver_path = tmpdir.join(f"module/{driver}") ++ driver_path.ensure_dir() ++ sys_net_path.join("swp0/device/driver").mksymlinkto(driver_path) ++ sys_net_path.join("swp1/device/driver").mksymlinkto(driver_path) ++ + with does_not_raise(): + net.get_interfaces_by_mac() + pattern = ( + "Ignoring duplicate macs from 'swp[0-1]' and 'swp[0-1]' due to " +- "driver 'mscc_felix'." ++ f"driver '{driver}'." + ) +- assert re.search(pattern, self.logs.getvalue()) ++ assert re.search(pattern, caplog.text) + + + class TestInterfacesSorting(CiTestCase): +-- +2.33.0 + + diff --git a/backport-net-dhcp-catch-dhclient-failures-and-raise-NoDHCPLea.patch b/backport-net-dhcp-catch-dhclient-failures-and-raise-NoDHCPLea.patch new file mode 100644 index 0000000000000000000000000000000000000000..e64f0d1e5cc74360542517cc8f0d11e9f5019400 --- /dev/null +++ b/backport-net-dhcp-catch-dhclient-failures-and-raise-NoDHCPLea.patch @@ -0,0 +1,118 @@ +From c82ace920a743c6e6797536416018d9680b8fa7e Mon Sep 17 00:00:00 2001 +From: Chris Patterson +Date: Wed, 29 Mar 2023 17:30:13 -0400 +Subject: [PATCH] net/dhcp: catch dhclient failures and raise NoDHCPLeaseError + (#2083) + +Some variants of dhclient will exit with non-zero codes on lease +failure. For example, on RHEL 8.7: +``` +[cpatterson@test-rhel87 ~]$ sudo /usr/sbin/dhclient -1 -v -lf +/tmp/my.lease -pf /tmp/my.pid bridge2nowhere -sf /bin/ +true +Internet Systems Consortium DHCP Client 4.3.6 +Copyright 2004-2017 Internet Systems Consortium. +All rights reserved. +For info, please visit https://www.isc.org/software/dhcp/ + +Listening on LPF/bridge2nowhere/42:ef:d5:38:1d:19 +Sending on LPF/bridge2nowhere/42:ef:d5:38:1d:19 +Sending on Socket/fallback +Created duid "\000\004E<\225X\232\304J\337\243\026T\324\243O\270\177". +DHCPDISCOVER on bridge2nowhere to 255.255.255.255 port 67 interval 4 +(xid=0x777bc142) +DHCPDISCOVER on bridge2nowhere to 255.255.255.255 port 67 interval 7 +(xid=0x777bc142) +DHCPDISCOVER on bridge2nowhere to 255.255.255.255 port 67 interval 13 +(xid=0x777bc142) +DHCPDISCOVER on bridge2nowhere to 255.255.255.255 port 67 interval 6 +(xid=0x777bc142) +No DHCPOFFERS received. +Unable to obtain a lease on first try. Exiting. + +[cpatterson@test-rhel87 ~]$ echo $? +2 +``` + +This results in an unhandled subp.ProcessExecutionError exception. +Catch these failures and re-raise as NoDHCPLeaseError. + +Signed-off-by: Chris Patterson +--- + cloudinit/net/dhcp.py | 11 ++++++++++- + cloudinit/net/tests/test_dhcp.py | 23 +++++++++++++++++++++-- + 2 files changed, 31 insertions(+), 3 deletions(-) + +diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py +index 3f4b041..e5f36e1 100644 +--- a/cloudinit/net/dhcp.py ++++ b/cloudinit/net/dhcp.py +@@ -239,7 +239,16 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None): + subp.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True) + cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file, + '-pf', pid_file, interface, '-sf', '/bin/true'] +- out, err = subp.subp(cmd, capture=True) ++ try: ++ out, err = subp.subp(cmd, capture=True) ++ except subp.ProcessExecutionError as error: ++ LOG.debug( ++ "dhclient exited with code: %s stderr: %r stdout: %r", ++ error.exit_code, ++ error.stderr, ++ error.stdout, ++ ) ++ raise NoDHCPLeaseError from error + + # Wait for pid file and lease file to appear, and for the process + # named by the pid file to daemonize (have pid 1 as its parent). If we +diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py +index 28b4ecf..de4b461 100644 +--- a/cloudinit/net/tests/test_dhcp.py ++++ b/cloudinit/net/tests/test_dhcp.py +@@ -3,14 +3,15 @@ + import httpretty + import os + import signal ++import pytest + from textwrap import dedent + + import cloudinit.net as net + from cloudinit.net.dhcp import ( +- InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, ++ InvalidDHCPLeaseFileError, NoDHCPLeaseError, maybe_perform_dhcp_discovery, + parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, + parse_static_routes) +-from cloudinit.util import ensure_file, write_file ++from cloudinit.util import ensure_file, subp, write_file + from cloudinit.tests.helpers import ( + CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) + +@@ -268,6 +269,24 @@ class TestDHCPDiscoveryClean(CiTestCase): + 'Skip dhcp_discovery: Unable to find fallback nic.', + self.logs.getvalue()) + ++ @mock.patch("cloudinit.net.dhcp.find_fallback_nic", return_value="eth9") ++ @mock.patch("cloudinit.net.dhcp.os.remove") ++ @mock.patch("cloudinit.net.dhcp.subp.subp") ++ def test_dhclient_exits_with_error(self, m_subp, m_remove, m_fallback): ++ """Log and do nothing when nic is absent and no fallback is found.""" ++ m_subp.side_effect = [ ++ ("", ""), ++ subp.ProcessExecutionError(exit_code=-5), ++ ] ++ ++ with pytest.raises(NoDHCPLeaseError): ++ maybe_perform_dhcp_discovery() ++ ++ self.assertIn( ++ "dhclient exited with code: -5", ++ self.logs.getvalue(), ++ ) ++ + def test_provided_nic_does_not_exist(self): + """When the provided nic doesn't exist, log a message and no-op.""" + self.assertEqual([], maybe_perform_dhcp_discovery('idontexist')) +-- +2.33.0 + + diff --git a/backport-net-skip-duplicate-mac-check-for-netvsc-nic-and-its-.patch b/backport-net-skip-duplicate-mac-check-for-netvsc-nic-and-its-.patch new file mode 100644 index 0000000000000000000000000000000000000000..7d6ebbd68c128de9fbaa14d4494933407f7ac13e --- /dev/null +++ b/backport-net-skip-duplicate-mac-check-for-netvsc-nic-and-its-.patch @@ -0,0 +1,161 @@ +From 24bf6147712655fc36a5d714a081853ea37e0312 Mon Sep 17 00:00:00 2001 +From: Anh Vo +Date: Fri, 18 Nov 2022 14:31:27 -0500 +Subject: [PATCH] net: skip duplicate mac check for netvsc nic and its VF + (#1853) + +Reference:https://github.com/canonical/cloud-init/commit/24bf6147712655fc36a5d714a081853ea37e0312 +Conflict:format diff. + +When accelerated network is enabled on Azure, the host presents +two network interfaces with the same mac address to the VM: +a synthetic nic (netvsc) and a VF nic, which is enslaved to the synthetic +nic. + +The net module is already excluding slave nics when enumerating +interfaces. However, if cloud-init starts enumerating after the kernel +makes the VF visible to userspace, but before the enslaving has finished, +cloud-init will see two nics with duplicate mac. + +This change will skip the duplicate mac error if one of the two nics +with duplicate mac is a netvsc nic + +LP: #1844191 +--- + cloudinit/net/__init__.py | 39 +++++++++++++++++++++++++++++++++---- + tests/unittests/test_net.py | 32 ++++++++++++++++++++++++++---- + 2 files changed, 63 insertions(+), 8 deletions(-) + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index fba133e..96ce6f5 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -896,13 +896,44 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict: + + Bridges and any devices that have a 'stolen' mac are excluded.""" + ret = {} +- for name, mac, _driver, _devid in get_interfaces( ++ driver_map: dict = {} ++ for name, mac, driver, _devid in get_interfaces( + blacklist_drivers=blacklist_drivers): + if mac in ret: +- raise RuntimeError( +- "duplicate mac found! both '%s' and '%s' have mac '%s'" % +- (name, ret[mac], mac)) ++ raise_duplicate_mac_error = True ++ msg = "duplicate mac found! both '%s' and '%s' have mac '%s'." % ( ++ name, ++ ret[mac], ++ mac, ++ ) ++ # Hyper-V netvsc driver will register a VF with the same mac ++ # ++ # The VF will be enslaved to the master nic shortly after ++ # registration. If cloud-init starts enumerating the interfaces ++ # before the completion of the enslaving process, it will see ++ # two different nics with duplicate mac. Cloud-init should ignore ++ # the slave nic (which does not have hv_netvsc driver). ++ if driver != driver_map[mac]: ++ if driver_map[mac] == "hv_netvsc": ++ LOG.warning( ++ msg + " Ignoring '%s' due to driver '%s' and " ++ "'%s' having driver hv_netvsc." ++ % (name, driver, ret[mac]) ++ ) ++ continue ++ if driver == "hv_netvsc": ++ raise_duplicate_mac_error = False ++ LOG.warning( ++ msg + " Ignoring '%s' due to driver '%s' and " ++ "'%s' having driver hv_netvsc." ++ % (ret[mac], driver_map[mac], name) ++ ) ++ ++ if raise_duplicate_mac_error: ++ raise RuntimeError(msg) ++ + ret[mac] = name ++ driver_map[mac] = driver + + # Pretend that an Infiniband GUID is an ethernet address for Openstack + # configuration purposes +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index ce19498..0db4442 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -5519,7 +5519,8 @@ class TestGetInterfacesByMac(CiTestCase): + 'bridges': ['bridge1'], + 'vlans': ['bond1.101'], + 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', +- 'bond1.101', 'lo'], ++ 'bond1.101', 'lo', ++ "netvsc0-vf", "netvsc0", "netvsc1","netvsc1-vf"], + 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', + 'enp0s2': 'aa:aa:aa:aa:aa:02', + 'bond1': 'aa:aa:aa:aa:aa:01', +@@ -5528,12 +5529,27 @@ class TestGetInterfacesByMac(CiTestCase): + 'bridge1-nic': 'aa:aa:aa:aa:aa:03', + 'lo': '00:00:00:00:00:00', + 'greptap0': '00:00:00:00:00:00', +- 'tun0': None}} ++ "greptap0": "00:00:00:00:00:00", ++ "netvsc0-vf": "aa:aa:aa:aa:aa:04", ++ "netvsc0": "aa:aa:aa:aa:aa:04", ++ "netvsc1-vf": "aa:aa:aa:aa:aa:05", ++ "netvsc1": "aa:aa:aa:aa:aa:05", ++ 'tun0': None}, ++ "drivers": { ++ "netvsc0": "hv_netvsc", ++ "netvsc0-vf": "foo", ++ "netvsc1": "hv_netvsc", ++ "netvsc1-vf": "bar", ++ }, ++ } + data = {} + + def _se_get_devicelist(self): + return list(self.data['devices']) + ++ def _se_device_driver(self, name): ++ return self.data["drivers"].get(name, None) ++ + def _se_get_interface_mac(self, name): + return self.data['macs'][name] + +@@ -5553,7 +5569,7 @@ class TestGetInterfacesByMac(CiTestCase): + def _mock_setup(self): + self.data = copy.deepcopy(self._data) + self.data['devices'] = set(list(self.data['macs'].keys())) +- mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', ++ mocks = ('get_devicelist', "device_driver", 'get_interface_mac', 'is_bridge', + 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr') + self.mocks = {} + for n in mocks: +@@ -5567,6 +5583,11 @@ class TestGetInterfacesByMac(CiTestCase): + self.data['macs']['bridge1-nic'] = self.data['macs']['enp0s1'] + self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + ++ def test_raise_exception_on_duplicate_netvsc_macs(self): ++ self._mock_setup() ++ self.data["macs"]["netvsc0"] = self.data["macs"]["netvsc1"] ++ self.assertRaises(RuntimeError, net.get_interfaces_by_mac) ++ + def test_excludes_any_without_mac_address(self): + self._mock_setup() + ret = net.get_interfaces_by_mac() +@@ -5580,7 +5601,10 @@ class TestGetInterfacesByMac(CiTestCase): + [mock.call('enp0s1'), mock.call('bond1')], any_order=True) + self.assertEqual( + {'aa:aa:aa:aa:aa:01': 'enp0s1', 'aa:aa:aa:aa:aa:02': 'enp0s2', +- 'aa:aa:aa:aa:aa:03': 'bridge1-nic', '00:00:00:00:00:00': 'lo'}, ++ 'aa:aa:aa:aa:aa:03': 'bridge1-nic', '00:00:00:00:00:00': 'lo', ++ "aa:aa:aa:aa:aa:04": "netvsc0", ++ "aa:aa:aa:aa:aa:05": "netvsc1", ++ }, + ret) + + def test_excludes_bridges(self): +-- +2.33.0 + + diff --git a/cloud-init.spec b/cloud-init.spec index bb33ee28a958b4f02787d33ecfa67cd09996a869..74ac3aa6d233fd8108a34cb266dad20d6c686c30 100644 --- a/cloud-init.spec +++ b/cloud-init.spec @@ -1,6 +1,6 @@ Name: cloud-init Version: 21.4 -Release: 19 +Release: 20 Summary: the defacto multi-distribution package that handles early initialization of a cloud instance. License: ASL 2.0 or GPLv3 URL: http://launchpad.net/cloud-init @@ -47,6 +47,25 @@ Patch6016: backport-util-add-Version-class.patch Patch9000: Fix-the-error-level-logs-displayed-for-the-cloud-init-local-service.patch +Patch6017: backport-cc_ubuntu_advantage-Redact-token-from-logs-1726.patch +Patch6018: backport-Do-not-silently-ignore-integer-uid-1280.patch +Patch6019: backport-Ensure-network-ready-before-cloud-init-service-runs.patch +Patch6020: backport-Fix-exception-when-no-activator-found-1129.patch +Patch6021: backport-Fix-KeyError-in-iproute-pformat-3287.patch +Patch6022: backport-Handle-non-existent-ca-cert-config-situation-2073.patch +Patch6023: backport-net-dhcp-catch-dhclient-failures-and-raise-NoDHCPLea.patch +Patch6024: backport-Don-t-fail-if-IB-and-Ethernet-devices-collide-1411.patch +Patch6025: backport-net-skip-duplicate-mac-check-for-netvsc-nic-and-its-.patch +Patch6026: backport-Ignore-duplicate-macs-from-mscc_felix-and-fsl_enetc.patch +Patch6027: backport-macs-ignore-duplicate-MAC-for-devs-with-driver-drive.patch +Patch6028: backport-Set-ownership-for-new-folders-in-Write-Files-Module-.patch +Patch6029: backport-Fix-network-v2-metric-rendering-4220.patch +Patch6030: backport-Fix-IPv6-netmask-format-for-sysconfig-1215.patch +Patch6031: backport-Fix-KeyError-when-rendering-sysconfig-IPv6-routes.patch +Patch6032: backport-Fix-default-route-rendering-on-v2-ipv6-1973.patch +Patch6033: backport-Drop-support-of-sk-keys-in-cc_ssh-1451.patch +Patch6034: backport-Do-not-generate-dsa-and-ed25519-key-types-when-crypt.patch + BuildRequires: pkgconfig(systemd) python3-devel python3-setuptools systemd BuildRequires: iproute python3-configobj python3-httpretty >= 0.8.14-2 BuildRequires: python3-jinja2 python3-jsonpatch python3-jsonschema @@ -155,6 +174,28 @@ fi %exclude /usr/share/doc/* %changelog +* Mon Aug 21 2023 shixuantong - 21.4-20 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:Do not generate dsa and ed25519 key types when crypto FIPS mode is enabled + Don't fail if IB and Ethernet devices 'collide' + Fix default route rendering on v2 ipv6 + Fix IPv6 netmask format for sysconfig + Fix KeyError when rendering sysconfig IPv6 routes + Fix network v2 metric rendering + Ignore duplicate macs from mscc_felix and fsl_enetc + macs: ignore duplicate MAC for devs with driver driver qmi_wwan + net/dhcp: catch dhclient failures and raise NoDHCPLeaseError + net: skip duplicate mac check for netvsc nic and its VF + Set ownership for new folders in Write Files Module + cc_ubuntu_advantage: Redact token from logs + Do not silently ignore integer uid + Ensure network ready before cloud-init service runs + Fix exception when no activator found + Fix KeyError in iproute pformat + Handle non existent ca-cert-config situation + * Fri Aug 18 2023 shixuantong - 21.4-19 - backport upstream patches