From 79f0f59f967be0bcb05d60461af5417141cbe5ab Mon Sep 17 00:00:00 2001 From: Yuhang Wei Date: Tue, 30 Jan 2024 16:05:56 +0800 Subject: [PATCH] fix: clippy warnings and fmt code fix some clippy warnings and fmt code Signed-off-by: Yuhang Wei --- KubeOS-Rust/agent/src/rpc/agent_impl.rs | 8 +- KubeOS-Rust/cli/src/client.rs | 2 +- KubeOS-Rust/manager/src/sys_mgmt/config.rs | 21 ++--- .../manager/src/sys_mgmt/containerd_image.rs | 9 +- .../manager/src/sys_mgmt/docker_image.rs | 4 +- .../proxy/src/controller/apiserver_mock.rs | 67 +++++++++------ .../proxy/src/controller/controller.rs | 75 ++++++++-------- KubeOS-Rust/proxy/src/controller/mod.rs | 2 +- KubeOS-Rust/proxy/src/controller/utils.rs | 12 +-- KubeOS-Rust/proxy/src/drain.rs | 86 +++++++++---------- KubeOS-Rust/proxy/src/main.rs | 2 +- 11 files changed, 148 insertions(+), 140 deletions(-) diff --git a/KubeOS-Rust/agent/src/rpc/agent_impl.rs b/KubeOS-Rust/agent/src/rpc/agent_impl.rs index 8aef4140..5f3a3259 100644 --- a/KubeOS-Rust/agent/src/rpc/agent_impl.rs +++ b/KubeOS-Rust/agent/src/rpc/agent_impl.rs @@ -56,7 +56,7 @@ impl Default for AgentImpl { } impl AgentImpl { - pub fn prepare_upgrade_impl(&self, req: UpgradeRequest) -> Result { + fn prepare_upgrade_impl(&self, req: UpgradeRequest) -> Result { let _lock = self.mutex.lock().unwrap(); debug!("Received an 'prepare upgrade' request: {:?}", req); info!("Start preparing for upgrading to version: {}", req.version); @@ -75,7 +75,7 @@ impl AgentImpl { Ok(Response { status: AgentStatus::UpgradeReady }) } - pub fn upgrade_impl(&self) -> Result { + fn upgrade_impl(&self) -> Result { let _lock = self.mutex.lock().unwrap(); info!("Start to upgrade"); let command_executor = RealCommandExecutor {}; @@ -90,7 +90,7 @@ impl AgentImpl { Ok(Response { status: AgentStatus::Upgraded }) } - pub fn configure_impl(&self, mut req: ConfigureRequest) -> Result { + fn configure_impl(&self, mut req: ConfigureRequest) -> Result { let _lock = self.mutex.lock().unwrap(); debug!("Received a 'configure' request: {:?}", req); info!("Start to configure"); @@ -107,7 +107,7 @@ impl AgentImpl { Ok(Response { status: AgentStatus::Configured }) } - pub fn rollback_impl(&self) -> Result { + fn rollback_impl(&self) -> Result { let _lock = self.mutex.lock().unwrap(); info!("Start to rollback"); let command_executor = RealCommandExecutor {}; diff --git a/KubeOS-Rust/cli/src/client.rs b/KubeOS-Rust/cli/src/client.rs index 9765a425..37518bdc 100644 --- a/KubeOS-Rust/cli/src/client.rs +++ b/KubeOS-Rust/cli/src/client.rs @@ -30,7 +30,7 @@ impl Client { Client { json_rpc_client: JsonRPCClient::with_transport(UdsTransport::new(socket_path)) } } - pub fn build_request<'a>(&self, command: &'a str, params: &'a Vec>) -> Request<'a> { + pub fn build_request<'a>(&self, command: &'a str, params: &'a [Box]) -> Request<'a> { let json_rpc_request = self.json_rpc_client.build_request(command, params); let request = Request(json_rpc_request); request diff --git a/KubeOS-Rust/manager/src/sys_mgmt/config.rs b/KubeOS-Rust/manager/src/sys_mgmt/config.rs index 33efdca8..138df9da 100644 --- a/KubeOS-Rust/manager/src/sys_mgmt/config.rs +++ b/KubeOS-Rust/manager/src/sys_mgmt/config.rs @@ -105,10 +105,10 @@ impl Configuration for KernelSysctlPersist { config_path = &config.config_path; } debug!("kernel.sysctl.persist config_path: \"{}\"", config_path); - create_config_file(config_path).with_context(|| format!("Failed to find config path"))?; + create_config_file(config_path).with_context(|| format!("Failed to find config path \"{}\"", config_path))?; let configs = get_and_set_configs(&mut config.contents, config_path) - .with_context(|| format!("Failed to set persist kernel configs"))?; - write_configs_to_file(config_path, &configs).with_context(|| format!("Failed to write configs to file"))?; + .with_context(|| format!("Failed to set persist kernel configs \"{}\"", config_path))?; + write_configs_to_file(config_path, &configs).with_context(|| "Failed to write configs to file".to_string())?; Ok(()) } } @@ -125,7 +125,7 @@ fn create_config_file(config_path: &str) -> Result<()> { } fn get_and_set_configs(expect_configs: &mut HashMap, config_path: &str) -> Result> { - let f = File::open(config_path)?; + let f = File::open(config_path).with_context(|| format!("Failed to open config path \"{}\"", config_path))?; let mut configs_write = Vec::new(); for line in io::BufReader::new(f).lines() { let line = line?; @@ -169,7 +169,7 @@ fn write_configs_to_file(config_path: &str, configs: &Vec) -> Result<()> Ok(()) } -fn handle_delete_key(config_kv: &Vec<&str>, new_config_info: &KeyInfo) -> String { +fn handle_delete_key(config_kv: &[&str], new_config_info: &KeyInfo) -> String { let key = config_kv[0]; if config_kv.len() == 1 && new_config_info.value.is_empty() { info!("Delete configuration key: \"{}\"", key); @@ -190,7 +190,7 @@ fn handle_delete_key(config_kv: &Vec<&str>, new_config_info: &KeyInfo) -> String String::new() } -fn handle_update_key(config_kv: &Vec<&str>, new_config_info: &KeyInfo) -> String { +fn handle_update_key(config_kv: &[&str], new_config_info: &KeyInfo) -> String { let key = config_kv[0]; if !new_config_info.operation.is_empty() { warn!( @@ -259,12 +259,13 @@ impl Configuration for GrubCmdline { self.is_cur_partition } else { self.get_config_partition(RealCommandExecutor {}) - .with_context(|| format!("Failed to get config partition"))? + .with_context(|| "Failed to get config partition".to_string())? }; debug!("Config_partition: {} (false means partition A, true means partition B)", config_partition); let configs = get_and_set_grubcfg(&mut config.contents, &self.grub_path, config_partition) - .with_context(|| format!("Failed to set grub configs"))?; - write_configs_to_file(&self.grub_path, &configs)?; + .with_context(|| "Failed to set grub configs".to_string())?; + write_configs_to_file(&self.grub_path, &configs) + .with_context(|| "Failed to write configs to file".to_string())?; Ok(()) } } @@ -286,7 +287,7 @@ fn get_and_set_grubcfg( grub_path: &str, config_partition: bool, ) -> Result> { - let f = File::open(grub_path)?; + let f = File::open(grub_path).with_context(|| format!("Failed to open grub.cfg \"{}\"", grub_path))?; let re_find_cur_linux = r"^\s*linux.*root=.*"; let re = Regex::new(re_find_cur_linux)?; let mut configs_write = Vec::new(); diff --git a/KubeOS-Rust/manager/src/sys_mgmt/containerd_image.rs b/KubeOS-Rust/manager/src/sys_mgmt/containerd_image.rs index 727949b6..80caf291 100644 --- a/KubeOS-Rust/manager/src/sys_mgmt/containerd_image.rs +++ b/KubeOS-Rust/manager/src/sys_mgmt/containerd_image.rs @@ -73,12 +73,12 @@ impl CtrImageHandler { .to_str() .ok_or_else(|| anyhow!("Failed to get mount path: {}", self.paths.mount_path.display()))?; info!("Start getting rootfs {}", image_name); - self.check_and_unmount(mount_path).with_context(|| format!("Failed to clean containerd environment"))?; + self.check_and_unmount(mount_path).with_context(|| "Failed to clean containerd environment".to_string())?; self.executor .run_command("ctr", &["-n", DEFAULT_NAMESPACE, "images", "mount", "--rw", image_name, mount_path])?; // copy os.tar from mount_path to its partent dir self.copy_file(self.paths.mount_path.join(&self.paths.rootfs_file), &self.paths.tar_path, permission)?; - self.check_and_unmount(mount_path).with_context(|| format!("Failed to clean containerd environment"))?; + self.check_and_unmount(mount_path).with_context(|| "Failed to clean containerd environment".to_string())?; Ok(()) } @@ -103,10 +103,7 @@ impl CtrImageHandler { #[cfg(test)] mod tests { - use std::{ - io::Write, - path::{Path, PathBuf}, - }; + use std::{io::Write, path::PathBuf}; use mockall::mock; use tempfile::NamedTempFile; diff --git a/KubeOS-Rust/manager/src/sys_mgmt/docker_image.rs b/KubeOS-Rust/manager/src/sys_mgmt/docker_image.rs index f6971427..4d97552c 100644 --- a/KubeOS-Rust/manager/src/sys_mgmt/docker_image.rs +++ b/KubeOS-Rust/manager/src/sys_mgmt/docker_image.rs @@ -52,7 +52,7 @@ impl DockerImageHandler { fn get_rootfs_archive(&self, req: &UpgradeRequest) -> Result<()> { let image_name = &req.container_image; info!("Start getting rootfs {}", image_name); - self.check_and_rm_container().with_context(|| format!("Failed to remove kubeos-temp container"))?; + self.check_and_rm_container().with_context(|| "Failed to remove kubeos-temp container".to_string())?; debug!("Create container {}", self.container_name); let container_id = self.executor.run_command_with_output("docker", &["create", "--name", &self.container_name, image_name])?; @@ -65,7 +65,7 @@ impl DockerImageHandler { self.paths.update_path.to_str().unwrap(), ], )?; - self.check_and_rm_container().with_context(|| format!("Failed to remove kubeos-temp container"))?; + self.check_and_rm_container().with_context(|| "Failed to remove kubeos-temp container".to_string())?; Ok(()) } diff --git a/KubeOS-Rust/proxy/src/controller/apiserver_mock.rs b/KubeOS-Rust/proxy/src/controller/apiserver_mock.rs index ef5977c1..2b182ca8 100644 --- a/KubeOS-Rust/proxy/src/controller/apiserver_mock.rs +++ b/KubeOS-Rust/proxy/src/controller/apiserver_mock.rs @@ -1,3 +1,35 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved. + * KubeOS is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +use std::collections::BTreeMap; + +use anyhow::Result; +use cli::{ + client::Client, + method::{ + callable_method::RpcMethod, configure::ConfigureMethod, prepare_upgrade::PrepareUpgradeMethod, + rollback::RollbackMethod, upgrade::UpgradeMethod, + }, +}; +use http::{Request, Response}; +use hyper::{body::to_bytes, Body}; +use k8s_openapi::api::core::v1::{Node, NodeSpec, NodeStatus, NodeSystemInfo, Pod}; +use kube::{ + api::ObjectMeta, + core::{ListMeta, ObjectList}, + Client as KubeClient, Resource, ResourceExt, +}; +use mockall::mock; + use self::mock_error::Error; use super::{ agentclient::*, @@ -10,23 +42,6 @@ use crate::controller::{ values::{LABEL_OSINSTANCE, LABEL_UPGRADING, NODE_STATUS_IDLE}, ProxyController, }; -use anyhow::Result; -use cli::client::Client; -use cli::method::{ - callable_method::RpcMethod, configure::ConfigureMethod, prepare_upgrade::PrepareUpgradeMethod, - rollback::RollbackMethod, upgrade::UpgradeMethod, -}; -use http::{Request, Response}; -use hyper::{body::to_bytes, Body}; -use k8s_openapi::api::core::v1::Pod; -use k8s_openapi::api::core::v1::{Node, NodeSpec, NodeStatus, NodeSystemInfo}; -use kube::{ - api::ObjectMeta, - core::{ListMeta, ObjectList}, -}; -use kube::{Client as KubeClient, Resource, ResourceExt}; -use mockall::mock; -use std::collections::BTreeMap; type ApiServerHandle = tower_test::mock::Handle, Response>; pub struct ApiServerVerifier(ApiServerHandle); @@ -66,7 +81,7 @@ impl ApiServerVerifier { .unwrap() .handler_node_get(osi) .await - } + }, Testcases::UpgradeNormal(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -85,7 +100,7 @@ impl ApiServerVerifier { .unwrap() .handler_node_pod_list_get(osi) .await - } + }, Testcases::UpgradeUpgradeconfigsVersionMismatch(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -104,7 +119,7 @@ impl ApiServerVerifier { .unwrap() .handler_osinstance_patch_nodestatus_idle(osi) .await - } + }, Testcases::UpgradeOSInstaceNodestatusConfig(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -114,7 +129,7 @@ impl ApiServerVerifier { .unwrap() .handler_node_get_with_label(osi.clone()) .await - } + }, Testcases::UpgradeOSInstaceNodestatusIdle(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -130,7 +145,7 @@ impl ApiServerVerifier { .unwrap() .handler_node_uncordon(osi) .await - } + }, Testcases::ConfigNormal(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -146,7 +161,7 @@ impl ApiServerVerifier { .unwrap() .handler_osinstance_patch_nodestatus_idle(osi) .await - } + }, Testcases::ConfigVersionMismatchReassign(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -159,7 +174,7 @@ impl ApiServerVerifier { .unwrap() .handler_osinstance_patch_nodestatus_idle(osi) .await - } + }, Testcases::ConfigVersionMismatchUpdate(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -172,7 +187,7 @@ impl ApiServerVerifier { .unwrap() .handler_osinstance_patch_spec_sysconfig_v2(osi) .await - } + }, Testcases::Rollback(osi) => { self.handler_osinstance_get_exist(osi.clone()) .await @@ -191,7 +206,7 @@ impl ApiServerVerifier { .unwrap() .handler_node_pod_list_get(osi) .await - } + }, } .expect("Case completed without errors"); }) diff --git a/KubeOS-Rust/proxy/src/controller/controller.rs b/KubeOS-Rust/proxy/src/controller/controller.rs index ad443802..80a85d1c 100644 --- a/KubeOS-Rust/proxy/src/controller/controller.rs +++ b/KubeOS-Rust/proxy/src/controller/controller.rs @@ -72,7 +72,7 @@ pub async fn reconcile( ) .await?; return Ok(REQUEUE_NORMAL); - } + }, ConfigOperation::UpdateConfig => { debug!("start update config"); osinstance.spec.sysconfigs = os_cr.spec.sysconfigs.clone(); @@ -81,8 +81,8 @@ pub async fn reconcile( .update_osinstance_spec(&osinstance.name(), &namespace, &osinstance.spec) .await?; return Ok(REQUEUE_ERROR); - } - _ => {} + }, + _ => {}, } proxy_controller.set_config(&mut osinstance, ConfigType::SysConfig).await?; proxy_controller @@ -92,17 +92,17 @@ pub async fn reconcile( if os_cr.spec.opstype == NODE_STATUS_CONFIG { return Err(Error::UpgradeBeforeConfig); } - if let ConfigOperation::Reassign = ConfigType::UpgradeConfig.check_config_version(&os, &osinstance) { - debug!("start reassign"); - proxy_controller - .refresh_node( - node, - osinstance, - &get_config_version(os_cr.spec.upgradeconfigs.as_ref()), - ConfigType::UpgradeConfig, - ) - .await?; - return Ok(REQUEUE_NORMAL); + if let ConfigOperation::Reassign = ConfigType::UpgradeConfig.check_config_version(&os, &osinstance) { + debug!("start reassign"); + proxy_controller + .refresh_node( + node, + osinstance, + &get_config_version(os_cr.spec.upgradeconfigs.as_ref()), + ConfigType::UpgradeConfig, + ) + .await?; + return Ok(REQUEUE_NORMAL); } if node.labels().contains_key(LABEL_UPGRADING) { if osinstance.spec.nodestatus == NODE_STATUS_IDLE { @@ -157,12 +157,12 @@ impl ProxyController { Ok(osi) => { debug!("osinstance is exist {:?}", osi.name()); Ok(()) - } + }, Err(kube::Error::Api(ErrorResponse { reason, .. })) if &reason == "NotFound" => { info!("Create OSInstance {}", node_name); self.controller_client.create_osinstance(node_name, namespace).await?; Ok(()) - } + }, Err(err) => Err(Error::KubeClient { source: err }), } } @@ -251,15 +251,15 @@ impl ProxyController { match config_info.configs.and_then(convert_to_agent_config) { Some(agent_configs) => { match self.agent_client.configure_method(ConfigInfo { configs: agent_configs }) { - Ok(_resp) => {} + Ok(_resp) => {}, Err(e) => { return Err(Error::Agent { source: e }); - } + }, } - } + }, None => { info!("config is none, No content can be configured."); - } + }, }; self.update_osi_status(osinstance, config_type).await?; } @@ -284,32 +284,32 @@ impl ProxyController { }; match self.agent_client.prepare_upgrade_method(upgrade_info) { - Ok(_resp) => {} + Ok(_resp) => {}, Err(e) => { return Err(Error::Agent { source: e }); - } + }, } self.evict_node(&node.name(), os_cr.spec.evictpodforce).await?; match self.agent_client.upgrade_method() { - Ok(_resp) => {} + Ok(_resp) => {}, Err(e) => { return Err(Error::Agent { source: e }); - } + }, } - } + }, OPERATION_TYPE_ROLLBACK => { self.evict_node(&node.name(), os_cr.spec.evictpodforce).await?; match self.agent_client.rollback_method() { - Ok(_resp) => {} + Ok(_resp) => {}, Err(e) => { return Err(Error::Agent { source: e }); - } + }, } - } + }, _ => { return Err(Error::Operation { value: os_cr.spec.opstype.clone() }); - } + }, } Ok(()) } @@ -320,12 +320,12 @@ impl ProxyController { node_api.cordon(node_name).await?; info!("Cordon node Successfully{}, start drain nodes", node_name); match self.drain_node(node_name, evict_pod_force).await { - Ok(()) => {} + Ok(()) => {}, Err(e) => { node_api.uncordon(node_name).await?; info!("Drain node {} error, uncordon node successfully", node_name); return Err(e); - } + }, } Ok(()) } @@ -351,7 +351,7 @@ fn convert_to_agent_config(configs: Configs) -> Option> { contents: contents_tmp, }; agent_configs.push(config_tmp) - } + }, None => { info!( "model {} which has configpath {} do not has any contents no need to configure", @@ -359,7 +359,7 @@ fn convert_to_agent_config(configs: Configs) -> Option> { config.configpath.unwrap_or_default() ); continue; - } + }, }; } if agent_configs.is_empty() { @@ -427,11 +427,14 @@ pub mod reconciler_error { #[cfg(test)] mod test { - use super::{error_policy, reconcile, Context, OSInstance, ProxyController, OS}; - use crate::controller::apiserver_mock::{timeout_after_5s, MockAgentCallClient, Testcases}; - use crate::controller::ControllerClient; use std::env; + use super::{error_policy, reconcile, Context, OSInstance, ProxyController, OS}; + use crate::controller::{ + apiserver_mock::{timeout_after_5s, MockAgentCallClient, Testcases}, + ControllerClient, + }; + #[tokio::test] async fn test_create_osinstance_with_no_upgrade_or_configuration() { let (test_proxy_controller, fakeserver) = ProxyController::::test(); diff --git a/KubeOS-Rust/proxy/src/controller/mod.rs b/KubeOS-Rust/proxy/src/controller/mod.rs index e30c8df7..b8a4e6e5 100644 --- a/KubeOS-Rust/proxy/src/controller/mod.rs +++ b/KubeOS-Rust/proxy/src/controller/mod.rs @@ -21,6 +21,6 @@ mod values; pub use agentclient::{AgentCallClient, AgentClient}; pub use apiclient::ControllerClient; -pub use controller::{error_policy, reconcile, reconciler_error::Error, ProxyController}; +pub use controller::{error_policy, reconcile, ProxyController}; pub use crd::OS; pub use values::SOCK_PATH; diff --git a/KubeOS-Rust/proxy/src/controller/utils.rs b/KubeOS-Rust/proxy/src/controller/utils.rs index 82d960b4..148ca24d 100644 --- a/KubeOS-Rust/proxy/src/controller/utils.rs +++ b/KubeOS-Rust/proxy/src/controller/utils.rs @@ -56,7 +56,7 @@ impl ConfigType { ); return ConfigOperation::Reassign; } - } + }, ConfigType::SysConfig => { let os_config_version = get_config_version(os.spec.sysconfigs.as_ref()); let osi_config_version = get_config_version(osinstance.spec.sysconfigs.as_ref()); @@ -78,7 +78,7 @@ impl ConfigType { return ConfigOperation::UpdateConfig; } } - } + }, }; ConfigOperation::DoNothing } @@ -96,7 +96,7 @@ impl ConfigType { status_config_version = get_config_version(None); } configs = osinstance.spec.upgradeconfigs.clone(); - } + }, ConfigType::SysConfig => { spec_config_version = get_config_version(osinstance.spec.sysconfigs.as_ref()); if let Some(osinstance_status) = osinstance.status.as_ref() { @@ -105,7 +105,7 @@ impl ConfigType { status_config_version = get_config_version(None); } configs = osinstance.spec.sysconfigs.clone(); - } + }, } debug!( "osinstance soec config version is {},status config version is {}", @@ -127,7 +127,7 @@ impl ConfigType { sysconfigs: None, }) } - } + }, ConfigType::SysConfig => { if let Some(osi_status) = &mut osinstance.status { osi_status.sysconfigs = osinstance.spec.sysconfigs.clone(); @@ -135,7 +135,7 @@ impl ConfigType { osinstance.status = Some(OSInstanceStatus { upgradeconfigs: None, sysconfigs: osinstance.spec.sysconfigs.clone() }) } - } + }, } } } diff --git a/KubeOS-Rust/proxy/src/drain.rs b/KubeOS-Rust/proxy/src/drain.rs index 72836f98..64417df3 100644 --- a/KubeOS-Rust/proxy/src/drain.rs +++ b/KubeOS-Rust/proxy/src/drain.rs @@ -66,7 +66,7 @@ async fn get_pods_deleted( Ok(pods @ ObjectList { .. }) => pods, Err(err) => { return Err(GetPodListsError { source: err, node_name: node_name.to_string() }); - } + }, }; let mut filterd_pods_list: Vec = Vec::new(); let mut filterd_err: Vec = Vec::new(); @@ -81,7 +81,7 @@ async fn get_pods_deleted( filterd_pods_list.push(pod); } } - if filterd_err.len() > 0 { + if !filterd_err.is_empty() { return Err(DeletePodsError { errors: filterd_err }); } Ok(filterd_pods_list.into_iter()) @@ -189,14 +189,14 @@ async fn wait_for_deletion(k8s_client: &kube::Client, pod: &Pod) -> Result<(), e let name = (&p).name_any(); info!("Pod {} deleted.", name); break; - } + }, Ok(_) => { info!("Pod '{}' is not yet deleted. Waiting {}s.", pod.name_any(), EVERY_DELETION_CHECK.as_secs_f64()); - } + }, Err(kube::Error::Api(e)) if e.code == response_error_not_found => { info!("Pod {} is deleted.", pod.name_any()); break; - } + }, Err(e) => { error!( "Get pod {} reported error: '{}', whether pod is deleted cannot be determined, waiting {}s.", @@ -204,7 +204,7 @@ async fn wait_for_deletion(k8s_client: &kube::Client, pod: &Pod) -> Result<(), e e, EVERY_DELETION_CHECK.as_secs_f64() ); - } + }, } if start_time.elapsed() > TIMEOUT { return Err(WaitDeletionError { pod_name: pod.name_any(), max_wait: TIMEOUT }); @@ -223,25 +223,25 @@ fn get_pod_api_with_namespace(client: &kube::Client, pod: &Pod) -> Api { } trait NameAny { - fn name_any(self: &Self) -> String; + fn name_any(&self) -> String; } impl NameAny for &Pod { - fn name_any(self: &Self) -> String { + fn name_any(&self) -> String { self.metadata.name.clone().or_else(|| self.metadata.generate_name.clone()).unwrap_or_default() } } trait PodFilter { - fn filter(self: &Self, pod: &Pod) -> Box; + fn filter(&self, pod: &Pod) -> Box; } struct FinishedOrFailedFilter {} impl PodFilter for FinishedOrFailedFilter { - fn filter(self: &Self, pod: &Pod) -> Box { + fn filter(&self, pod: &Pod) -> Box { return match pod.status.as_ref() { Some(PodStatus { phase: Some(phase), .. }) if phase == "Failed" || phase == "Succeeded" => { FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay) - } + }, _ => FilterResult::create_filter_result(false, "", PodDeleteStatus::Okay), }; } @@ -251,7 +251,7 @@ struct DaemonFilter { force: bool, } impl PodFilter for DaemonFilter { - fn filter(self: &Self, pod: &Pod) -> Box { + fn filter(&self, pod: &Pod) -> Box { if let FilterResult { result: true, .. } = self.finished_or_failed_filter.filter(pod).as_ref() { return FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay); } @@ -269,25 +269,25 @@ impl PodFilter for DaemonFilter { let description = format!("Cannot drain Pod '{}': Pod is member of a DaemonSet", pod.name_any()); Box::new(FilterResult { result: false, desc: description, status: PodDeleteStatus::Error }) } - } + }, _ => FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay), }; } } impl DaemonFilter { fn new(force: bool) -> DaemonFilter { - return DaemonFilter { finished_or_failed_filter: FinishedOrFailedFilter {}, force: force }; + DaemonFilter { finished_or_failed_filter: FinishedOrFailedFilter {}, force } } } struct MirrorFilter {} impl PodFilter for MirrorFilter { - fn filter(self: &Self, pod: &Pod) -> Box { + fn filter(&self, pod: &Pod) -> Box { return match pod.metadata.annotations.as_ref() { Some(annotations) if annotations.contains_key("kubernetes.io/config.mirror") => { let description = format!("Ignore Pod '{}': Pod is a static Mirror Pod", pod.name_any()); FilterResult::create_filter_result(false, &description.to_string(), PodDeleteStatus::Warning) - } + }, _ => FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay), }; } @@ -298,7 +298,7 @@ struct LocalStorageFilter { force: bool, } impl PodFilter for LocalStorageFilter { - fn filter(self: &Self, pod: &Pod) -> Box { + fn filter(&self, pod: &Pod) -> Box { if let FilterResult { result: true, .. } = self.finished_or_failed_filter.filter(pod).as_ref() { return FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay); } @@ -312,14 +312,14 @@ impl PodFilter for LocalStorageFilter { let description = format!("Cannot drain Pod '{}': Pod has local Storage", pod.name_any()); Box::new(FilterResult { result: false, desc: description, status: PodDeleteStatus::Error }) } - } + }, _ => FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay), }; } } impl LocalStorageFilter { fn new(force: bool) -> LocalStorageFilter { - return LocalStorageFilter { finished_or_failed_filter: FinishedOrFailedFilter {}, force: force }; + LocalStorageFilter { finished_or_failed_filter: FinishedOrFailedFilter {}, force } } } struct UnreplicatedFilter { @@ -327,7 +327,7 @@ struct UnreplicatedFilter { force: bool, } impl PodFilter for UnreplicatedFilter { - fn filter(self: &Self, pod: &Pod) -> Box { + fn filter(&self, pod: &Pod) -> Box { if let FilterResult { result: true, .. } = self.finished_or_failed_filter.filter(pod).as_ref() { return FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay); } @@ -338,18 +338,18 @@ impl PodFilter for UnreplicatedFilter { return FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay); } - return if !is_replicated && self.force { + if !is_replicated && self.force { let description = format!("Force drain Pod '{}': Pod is unreplicated", pod.name_any()); Box::new(FilterResult { result: true, desc: description, status: PodDeleteStatus::Warning }) } else { let description = format!("Cannot drain Pod '{}': Pod is unreplicated", pod.name_any()); Box::new(FilterResult { result: false, desc: description, status: PodDeleteStatus::Error }) - }; + } } } impl UnreplicatedFilter { fn new(force: bool) -> UnreplicatedFilter { - return UnreplicatedFilter { finished_or_failed_filter: FinishedOrFailedFilter {}, force: force }; + UnreplicatedFilter { finished_or_failed_filter: FinishedOrFailedFilter {}, force } } } @@ -357,7 +357,7 @@ struct DeletedFilter { delete_wait_timeout: Duration, } impl PodFilter for DeletedFilter { - fn filter(self: &Self, pod: &Pod) -> Box { + fn filter(&self, pod: &Pod) -> Box { let now = Instant::now().elapsed(); return match pod.metadata.deletion_timestamp.as_ref() { Some(time) @@ -365,7 +365,7 @@ impl PodFilter for DeletedFilter { && now - Duration::from_secs(time.0.timestamp() as u64) >= self.delete_wait_timeout => { FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay) - } + }, _ => FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay), }; } @@ -379,14 +379,14 @@ struct CombinedFilter { unreplicated_filter: UnreplicatedFilter, } impl PodFilter for CombinedFilter { - fn filter(self: &Self, pod: &Pod) -> Box { + fn filter(&self, pod: &Pod) -> Box { let mut filter_res = self.deleted_filter.filter(pod); if !filter_res.result { info!("{}", filter_res.desc); return Box::new(FilterResult { result: filter_res.result, desc: filter_res.desc.clone(), - status: filter_res.status.clone(), + status: filter_res.status, }); } filter_res = self.daemon_filter.filter(pod); @@ -395,7 +395,7 @@ impl PodFilter for CombinedFilter { return Box::new(FilterResult { result: filter_res.result, desc: filter_res.desc.clone(), - status: filter_res.status.clone(), + status: filter_res.status, }); } filter_res = self.mirror_filter.filter(pod); @@ -404,7 +404,7 @@ impl PodFilter for CombinedFilter { return Box::new(FilterResult { result: filter_res.result, desc: filter_res.desc.clone(), - status: filter_res.status.clone(), + status: filter_res.status, }); } filter_res = self.local_storage_filter.filter(pod); @@ -413,7 +413,7 @@ impl PodFilter for CombinedFilter { return Box::new(FilterResult { result: filter_res.result, desc: filter_res.desc.clone(), - status: filter_res.status.clone(), + status: filter_res.status, }); } filter_res = self.unreplicated_filter.filter(pod); @@ -422,22 +422,22 @@ impl PodFilter for CombinedFilter { return Box::new(FilterResult { result: filter_res.result, desc: filter_res.desc.clone(), - status: filter_res.status.clone(), + status: filter_res.status, }); } - return FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay); + FilterResult::create_filter_result(true, "", PodDeleteStatus::Okay) } } impl CombinedFilter { fn new(force: bool) -> CombinedFilter { - return CombinedFilter { + CombinedFilter { deleted_filter: DeletedFilter { delete_wait_timeout: TIMEOUT }, daemon_filter: DaemonFilter::new(force), mirror_filter: MirrorFilter {}, local_storage_filter: LocalStorageFilter::new(force), unreplicated_filter: UnreplicatedFilter::new(force), - }; + } } } @@ -454,7 +454,7 @@ struct FilterResult { } impl FilterResult { fn create_filter_result(result: bool, desc: &str, status: PodDeleteStatus) -> Box { - Box::new(FilterResult { result: result, desc: desc.to_string(), status: status }) + Box::new(FilterResult { result, desc: desc.to_string(), status }) } } @@ -468,13 +468,11 @@ impl ErrorHandleStrategy { let backoff = ExponentialBackoff::from_millis(RETRY_BASE_DELAY.as_millis() as u64).max_delay(RETRY_MAX_DELAY).map(jitter); - return match self { - Self::TolerateStrategy => { - return backoff.take(0); - } + match self { + Self::TolerateStrategy => backoff.take(0), Self::RetryStrategy => backoff.take(MAX_RETRIES_TIMES), - }; + } } } @@ -482,13 +480,7 @@ impl tokio_retry::Condition for ErrorHandleStrategy { fn should_retry(&mut self, error: &error::EvictionError) -> bool { match self { Self::TolerateStrategy => false, - Self::RetryStrategy => { - if let error::EvictionError::EvictionErrorRetry { .. } = error { - true - } else { - false - } - } + Self::RetryStrategy => matches!(error, error::EvictionError::EvictionErrorRetry { .. }), } } } diff --git a/KubeOS-Rust/proxy/src/main.rs b/KubeOS-Rust/proxy/src/main.rs index ad36b642..5c122ba2 100644 --- a/KubeOS-Rust/proxy/src/main.rs +++ b/KubeOS-Rust/proxy/src/main.rs @@ -39,7 +39,7 @@ async fn main() -> Result<()> { .run(reconcile, error_policy, Context::new(proxy_controller)) .for_each(|res| async move { match res { - Ok(_o) => {} + Ok(_o) => {}, Err(e) => error!("reconcile failed: {}", e.to_string()), } }) -- Gitee