From ab185a49aa31a0bce7286d5205d448e4bdbbb71d Mon Sep 17 00:00:00 2001 From: yzc1114 Date: Thu, 5 Jun 2025 11:17:44 +0800 Subject: [PATCH] =?UTF-8?q?=E6=94=AF=E6=8C=81=E6=97=A5=E5=BF=97id=E9=93=BE?= =?UTF-8?q?=E8=B7=AF=E8=BF=BD=E8=B8=AA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Cargo.toml | 9 + examples/client_example.rs | 177 ++++++++ src/cores/daemons/consensus.rs | 79 ++-- src/cores/daemons/messaging.rs | 31 +- src/cores/daemons/mod.rs | 2 +- src/cores/handlers/README.md | 19 +- src/cores/handlers/api_server.rs | 55 ++- src/cores/handlers/cluster_info.rs | 13 +- src/cores/handlers/consensus.rs | 20 +- src/cores/handlers/datamgr/datamgr_api.rs | 1 - src/cores/handlers/datamgr/route.rs | 35 +- src/cores/handlers/mod.rs | 6 +- src/cores/handlers/network_status/README.md | 78 ++-- src/cores/handlers/network_status/mod.rs | 10 +- src/cores/handlers/network_status/models.rs | 3 +- src/cores/handlers/payload.rs | 52 +-- src/cores/handlers/permissions.rs | 118 ++--- src/cores/handlers/roles.rs | 182 ++++---- src/cores/handlers/router_mgr.rs | 72 ++- src/cores/handlers/router_topo.rs | 15 +- src/cores/handlers/security.rs | 55 ++- src/cores/handlers/test.rs | 13 +- src/cores/handlers/users.rs | 185 ++++---- src/cores/mod.rs | 40 +- src/cores/router.rs | 17 +- src/cores/servers/actix_web/mod.rs | 17 +- src/cores/servers/actix_web/quic.rs | 42 +- src/cores/servers/actix_web/rs422/mod.rs | 4 +- src/cores/servers/actix_web/rs422/server.rs | 2 +- src/cores/servers/actix_web/tcp.rs | 2 +- src/cores/servers/actix_web/udp.rs | 34 +- src/cores/servers/actix_web/utils.rs | 2 +- src/cores/servers/message.rs | 26 +- src/cores/servers/mod.rs | 12 +- src/cores/services/mod.rs | 2 +- src/cores/services/network_status.rs | 42 +- src/cores/state.rs | 2 +- src/db/README.md | 11 + src/db/check_exist.rs | 41 +- src/db/delete.rs | 140 +++--- src/db/get.rs | 464 ++++++++------------ src/db/insert.rs | 70 ++- src/db/mod.rs | 2 +- src/db/network_status_ops.rs | 73 ++- src/db/update.rs | 100 ++--- src/lib.rs | 4 +- src/middleware/mod.rs | 2 +- src/middleware/token.rs | 23 +- src/schema.rs | 2 - src/utils/mod.rs | 6 +- src/utils/password.rs | 1 - src/utils/request_context.rs | 46 ++ src/utils/test.rs | 4 +- src/utils/token.rs | 30 +- src/utils/uuid.rs | 3 +- 55 files changed, 1401 insertions(+), 1095 deletions(-) create mode 100644 examples/client_example.rs create mode 100644 src/utils/request_context.rs diff --git a/Cargo.toml b/Cargo.toml index b47536a..3cda616 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,10 @@ readme = "README.md" name = "basic_example" required-features = ["eventbus", "servers", "test"] +[[example]] +name = "client_example" +required-features = ["eventbus", "servers", "test"] + #[[example]] #name = "api_generator" #required-features = [] @@ -46,6 +50,9 @@ test = ["utils", "eventbus", "os_socket"] messaging = ["eventbus"] os_socket = ["os_socket_comms"] +[dev-dependencies] +ctrlc = "3.4.5" + [dependencies] # crates os_socket_comms = { path = "crates/os_socket_comms", optional = true } @@ -100,3 +107,5 @@ futures-util = "0.3.31" serialport = "=4.6.1" reqwest = { version = "0.11", features = ["json", "blocking"] } +tracing = "0.1.41" +tracing-subscriber = { version = "0.3.19", features = ["std", "env-filter"] } diff --git a/examples/client_example.rs b/examples/client_example.rs new file mode 100644 index 0000000..8e81598 --- /dev/null +++ b/examples/client_example.rs @@ -0,0 +1,177 @@ +use client_rust::config::ClientConfig; +use client_rust::traits::{APIClient, APIClientExt}; +use client_rust::ClientSet; +use env_logger::{Builder, Target}; +use fleetmodv2::api_server::ResourcesParams; +use fleetmodv2::resources::models::{Metadata, Pod}; +use std::collections::HashMap; +use std::fs; +use std::path::Path; +use std::sync::Arc; +use feventbus::impls::messaging::messaging::Messaging; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::{fmt, EnvFilter}; +use tracing_subscriber::util::SubscriberInitExt; +use fleet_apiserver::utils::test; + +fn setup_logger() { + let mut builder = Builder::from_default_env(); + builder.target(Target::Stdout); + builder.init(); +} + +pub fn init_logging() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); +} + +const TCP_ADDRESS: &str = "127.0.0.1:39090"; +const QUIC_ADDRESS: &str = "127.0.0.1:39092"; + +#[tokio::main] +async fn main() { + init_logging(); + let (msg_cli, _) = test::setup_full_test_env(test::TestServerStartParams::builder().actix_web_tcp_address(TCP_ADDRESS.to_string()).build()).await; + // wait the server to start + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + let cluster_id = match get_cluster_id() { + Ok(cluster_id) => cluster_id, + Err(e) => { + eprintln!("Failed to get cluster_id: {}", e); + return; + } + }; + let cs = init_event_client(msg_cli, cluster_id.as_str()).await; + // let cs = init_tcp_client(cluster_id.as_str()).await; + // let cs = init_quic_client(app_state.cluster_id.as_str()).await; + test_web_client(cs, cluster_id.as_str()).await; + let _ = ctrlc::set_handler(move || { + tracing::info!("Received Ctrl+C!"); + std::process::exit(0); + }); +} + +pub fn get_cluster_id() -> Result> { + let path = Path::new("/root/.config/iscas/fleet/uuid.conf"); + let content = fs::read_to_string(path)?.trim().to_string(); + Ok(content) +} + +async fn init_tcp_client(cluster_id: &str) -> Box { + // let (msg_cli, app_state) = setup_apiserver().await; + let client_config = ClientConfig { + base_url: format!("http://{}", TCP_ADDRESS), + cluster_id: Some(cluster_id.to_string()), + }; + let cs = ClientSet::actix_web(&client_config).await; + cs +} + +async fn init_quic_client(cluster_id: &str) -> Box { + // let (msg_cli, app_state) = setup_apiserver().await; + let client_config = ClientConfig { + base_url: format!("https://{}", QUIC_ADDRESS), + cluster_id: Some(cluster_id.to_string()), + }; + let cs = ClientSet::actix_web_quic(&client_config).await; + cs +} + +async fn init_event_client(msg_cli: Arc, cluster_id: &str) -> Box { + let cs = ClientSet::events_with_cluster_id(cluster_id, msg_cli).await; + cs +} + +async fn test_web_client(cs: Box, cluster_id: &str) { + let pod1 = mock_pod("example-pod1", "default", cluster_id); + let pod2 = mock_pod("example-pod2", "default", cluster_id); + + let pod_resources = cs.resources(); + tracing::debug!("before create pod1: {:?}", pod1); + let created_pod1 = pod_resources.create(&pod1).await.unwrap(); + tracing::debug!("after create pod1: {:?}", created_pod1); + // assert_eq!(pod1, created_pod1); + let pod1 = created_pod1; + + let created_pod2 = pod_resources.create(&pod2).await.unwrap(); + let pod2 = created_pod2; + // assert_eq!(pod2, created_pod2); + + let get_pod1 = pod_resources + .get("v1", "pods", "example-pod1") + .await + .unwrap(); + assert_eq!(pod1, get_pod1); + + let list_pods = pod_resources.list("v1", "pods").await.unwrap(); + let pods = vec![pod1.clone(), pod2.clone()]; + assert_eq!(pods, list_pods); + + let updated_pod1 = { + let mut pod = pod1.clone(); + pod.metadata.as_mut().unwrap().labels = + HashMap::from([("key".to_string(), "value".to_string())]); + pod + }; + + let get_updated_pod1 = pod_resources.update(&updated_pod1).await.unwrap(); + // assert_eq!(updated_pod1, get_updated_pod1); + + let _ = pod_resources + .delete_by_args("v1", "pods", "example-pod1") + .await + .unwrap(); + let list_pods = pod_resources.list("v1", "pods").await.unwrap(); + // assert_eq!(vec![pod2.clone()], list_pods); + + let updated_pod2 = { + let mut pod = pod2.clone(); + pod.metadata.as_mut().unwrap().labels = + HashMap::from([("key".to_string(), "value".to_string())]); + pod + }; + let json_diff = json_patch::diff( + &serde_json::to_value(&pod2).unwrap(), + &serde_json::to_value(&updated_pod2).unwrap(), + ); + let _ = pod_resources + .patch_by_params( + ResourcesParams::builder() + .version("v1") + .plural("pods") + .name("example-pod2") + .cluster_id(cluster_id) + .body(serde_json::to_value(json_diff).unwrap()) + .build(), + ) + .await + .unwrap(); + let get_patched_pod2 = pod_resources + .get("v1", "pods", "example-pod2") + .await + .unwrap(); + // assert_eq!(updated_pod2, get_patched_pod2); + + tracing::info!("test_web_client done"); +} + +fn mock_pod(name: &str, namespace: &str, cluster_id: &str) -> Pod { + Pod { + kind: "Pod".to_string(), + api_version: "v1".to_string(), + metadata: Some(Metadata { + name: name.to_string(), + namespace: Some(namespace.to_string()), + cluster_id: Some(cluster_id.to_string()), + uid: Some(format!("pod-uid-{}-{}", name, namespace)), + ..Default::default() + }), + spec: Some(fleetmodv2::resources::models::PodSpec { + node_name: Some("pod_nodename".to_string()), + ..Default::default() + }), + ..Default::default() + } +} diff --git a/src/cores/daemons/consensus.rs b/src/cores/daemons/consensus.rs index 21cffa6..ac00cda 100644 --- a/src/cores/daemons/consensus.rs +++ b/src/cores/daemons/consensus.rs @@ -5,16 +5,20 @@ use client_rust::{ actix_web_client::{ActixWebAPICaller, TransportProtocol}, config, }; -use consensus_kv::{raft::{ - network::{ - AddLearnerRequest, AddLearnerResponse, ChangeMembershipRequest, - ChangeMembershipResponse, GetMetricsResponse, SnapshotRequest, +use consensus_kv::{ + raft::{ + network::{ + AddLearnerRequest, AddLearnerResponse, ChangeMembershipRequest, + ChangeMembershipResponse, GetMetricsResponse, SnapshotRequest, + }, + types::{ + AppendEntriesRequest, AppendEntriesResponse, Fatal, RaftError, SnapshotResponse, + VoteRequest, VoteResponse, + }, }, - types::{ - AppendEntriesRequest, AppendEntriesResponse, Fatal, RaftError, SnapshotResponse, - VoteRequest, VoteResponse, - }, -}, ConsensusConfig, ConsensusError, ConsensusRPCError, ConsensusResult, ConsensusService, Peer, PeerId, RPCSender, RaftConsensusService}; + ConsensusConfig, ConsensusError, ConsensusRPCError, ConsensusResult, ConsensusService, Peer, + PeerId, RPCSender, RaftConsensusService, +}; use tokio::sync::Mutex; pub struct ConsensusDaemon { @@ -38,32 +42,36 @@ impl ConsensusDaemon { rpc_sender, consensus_kv::ConsensusServiceImpls::Raft, ) - .await - .expect("Failed to init consensus service"); + .await + .expect("Failed to init consensus service"); consensus_svc } pub async fn start(&self) { let consensus_svc = self.new_consensus_svc().await; - log::info!("Consensus service start init, self peer: {:?}", self.consensus_config.self_peer); + tracing::info!( + "Consensus service start init, self peer: {:?}", + self.consensus_config.self_peer + ); // initialize the service let start_pristine_result = consensus_svc .start_pristine( - &self.consensus_config + &self + .consensus_config .init_peers .clone() .expect("init peers not provided"), ) .await; if start_pristine_result.is_ok() { - log::info!("Consensus service start pritine successfully"); - self.consensus_svc - .lock() - .await - .replace(consensus_svc); + tracing::info!("Consensus service start pritine successfully"); + self.consensus_svc.lock().await.replace(consensus_svc); return; } - log::error!("Failed to start consensus service: {:?}", start_pristine_result); + tracing::error!( + "Failed to start consensus service: {:?}", + start_pristine_result + ); } pub async fn handle_requests( @@ -72,9 +80,14 @@ impl ConsensusDaemon { ) -> Vec> { let consensus_svc = self.consensus_svc.lock().await; if consensus_svc.is_none() { - return requests.into_iter().map(|_| { - Err(ConsensusError::RPCError(ConsensusRPCError::Application("Not Initialized".to_string()))) - }).collect(); + return requests + .into_iter() + .map(|_| { + Err(ConsensusError::RPCError(ConsensusRPCError::Application( + "Not Initialized".to_string(), + ))) + }) + .collect(); } let consensus_svc = consensus_svc.as_ref().unwrap(); let mut responses = Vec::new(); @@ -89,12 +102,18 @@ impl ConsensusDaemon { let mut curr_svc = self.consensus_svc.lock().await; if curr_svc.is_some() { // 如果当前服务已经存在,则先关闭当前服务 - log::info!("Consensus service shutdown before join, self peer: {:?}", self.consensus_config.self_peer); + tracing::info!( + "Consensus service shutdown before join, self peer: {:?}", + self.consensus_config.self_peer + ); curr_svc.as_ref().unwrap().shutdown().await; } let consensus_svc = self.new_consensus_svc().await; curr_svc.replace(consensus_svc.clone()); - log::info!("Consensus service start join, self peer: {:?}", self.consensus_config.self_peer); + tracing::info!( + "Consensus service start join, self peer: {:?}", + self.consensus_config.self_peer + ); // initialize the service let curr_svc = curr_svc.as_ref().unwrap(); let result = curr_svc.join(&peer).await; @@ -153,10 +172,7 @@ impl ConsensusDaemon { result } - pub async fn add_learner( - &self, - req: AddLearnerRequest, - ) -> AddLearnerResponse { + pub async fn add_learner(&self, req: AddLearnerRequest) -> AddLearnerResponse { let svc = self.consensus_svc.lock().await; if svc.is_none() { return Err(RaftError::Fatal(Fatal::Stopped)); @@ -195,9 +211,10 @@ impl ConsensusDaemon { pub async fn get_peers(&self) -> BTreeMap { let svc = self.consensus_svc.lock().await; if svc.is_none() { - return BTreeMap::from([ - (self.consensus_config.self_peer.id, self.consensus_config.self_peer.clone()) - ]); + return BTreeMap::from([( + self.consensus_config.self_peer.id, + self.consensus_config.self_peer.clone(), + )]); } let svc = svc.as_ref().unwrap(); svc.get_peers() diff --git a/src/cores/daemons/messaging.rs b/src/cores/daemons/messaging.rs index 75c7b17..8c513b1 100644 --- a/src/cores/daemons/messaging.rs +++ b/src/cores/daemons/messaging.rs @@ -39,7 +39,7 @@ impl WatchDaemon { let msg_cli = self.msg_cli.clone(); let topics = self.topics.clone(); tokio::spawn(async move { - log::info!("WatchEventPublisher started receiving messages"); + tracing::info!("WatchEventPublisher started receiving messages"); let mut rx = rx.lock().await; // 锁定接收器 let buffer_limit = 32; let mut buffer = Vec::with_capacity(buffer_limit); @@ -50,7 +50,7 @@ impl WatchDaemon { if received_cnt == 0 { continue; } - log::info!("WatchEventPublisher Received {} messages", received_cnt); + tracing::info!("WatchEventPublisher Received {} messages", received_cnt); Self::publish_watch_events_to_topics(msg_cli.clone(), topics.clone(), &buffer) .await; } @@ -63,7 +63,7 @@ impl WatchDaemon { return; } topics.push(topic.clone()); - log::info!( + tracing::info!( "WatchEventPublisher Added PubSubEventTopic: {:?}", EventTopic::PubSub(topic) ); @@ -109,15 +109,15 @@ impl WatchDaemon { ); let msg_cli = msg_cli.clone(); // 另起一个协程来发布消息 - log::debug!( + tracing::debug!( "WatchEventPublisher Publishing {} message(s) to topic {}", cnt, topic ); if let Err(e) = msg_cli.publish(to_publish).await { - log::error!("WatchEventPublisher Failed to publish event: {}", e); + tracing::error!("WatchEventPublisher Failed to publish event: {}", e); } - log::debug!( + tracing::debug!( "WatchEventPublisher Published {} message(s) to topic {}", cnt, topic @@ -132,7 +132,7 @@ impl WatchDaemon { .send(WatchEventMessageValue::Created(data)) .await { - log::error!("Failed to publish create event: {}", e); + tracing::error!("Failed to publish create event: {}", e); } } @@ -142,7 +142,7 @@ impl WatchDaemon { .send(WatchEventMessageValue::Updated(data)) .await { - log::error!("Failed to publish update event: {}", e); + tracing::error!("Failed to publish update event: {}", e); } } @@ -152,7 +152,7 @@ impl WatchDaemon { .send(WatchEventMessageValue::Deleted(data)) .await { - log::error!("Failed to publish delete event: {}", e); + tracing::error!("Failed to publish delete event: {}", e); } } @@ -213,7 +213,7 @@ pub async fn watch_raw( let topic_str = topic_str_clone.clone(); Box::pin(async move { if message.body.is_none() { - log::warn!( + tracing::warn!( "Watcher on {} Received message with no body", topic_str.as_str() ); @@ -225,7 +225,7 @@ pub async fn watch_raw( for value in body.values.into_iter() { let value = serde_json::to_value(value); if let Err(e) = value { - log::error!( + tracing::error!( "Watcher on {} Failed to convert value: {}", topic_str.as_str(), e @@ -233,11 +233,14 @@ pub async fn watch_raw( continue; } if sx.is_closed() { - log::trace!("Watcher on {} channel is closed", topic_str.as_str()); + tracing::trace!( + "Watcher on {} channel is closed", + topic_str.as_str() + ); return Ok("".to_string()); } if let Err(e) = sx.send(value.unwrap()).await { - log::warn!( + tracing::warn!( "Watcher on {} Failed to send message to channel: {}", topic_str.as_str(), e @@ -250,7 +253,7 @@ pub async fn watch_raw( ) .await; if let Err(e) = subscribe_result { - log::error!( + tracing::error!( "Watcher on {} Failed to subscribe: {}.", topic_str.as_str(), e diff --git a/src/cores/daemons/mod.rs b/src/cores/daemons/mod.rs index 3849c0d..c14aead 100644 --- a/src/cores/daemons/mod.rs +++ b/src/cores/daemons/mod.rs @@ -1,3 +1,3 @@ +pub mod consensus; #[cfg(feature = "messaging")] pub mod messaging; -pub mod consensus; \ No newline at end of file diff --git a/src/cores/handlers/README.md b/src/cores/handlers/README.md index 8cd0fd2..2ed323d 100644 --- a/src/cores/handlers/README.md +++ b/src/cores/handlers/README.md @@ -3,13 +3,17 @@ ## 添加新的 Handler 流程 ### 1. 创建新的 Handler 模块 + 在 `src/cores/handlers` 目录下创建新的模块目录: + ```bash mkdir src/cores/handlers/new_feature ``` ### 2. 创建模块文件 + 创建必要的文件: + ```bash touch src/cores/handlers/new_feature/mod.rs touch src/cores/handlers/new_feature/ops.rs @@ -17,17 +21,19 @@ touch src/cores/handlers/new_feature/README.md ``` ### 3. 实现 Handler 函数 + 在 `mod.rs` 中实现处理函数,例如网络接口处理: + ```rust pub async fn get_network_interface( - app_state: Arc, + app_state: Arc, _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state .db_pool .get_connection() .map_err(|e| ServerError::internal_error(&format!("DB pool error: {}", e)))?; - + let params = prepare_network_request_params(&server_request, true)?; let interface_name = params.require_name()?; @@ -40,7 +46,9 @@ pub async fn get_network_interface( ``` ### 4. 实现业务逻辑 + 在 `ops.rs` 中实现具体的业务逻辑: + ```rust pub async fn get_network_interface( conn: &mut PgConnection, @@ -53,7 +61,9 @@ pub async fn get_network_interface( ``` ### 5. 注册路由 + 在 `src/cores/routes.rs` 中添加新的路由: + ```rust pub fn register_routes(app: &mut App) { app.at("/network/interface/:name") @@ -62,18 +72,22 @@ pub fn register_routes(app: &mut App) { ``` ### 6. 添加文档 + 在 `README.md` 中添加 API 文档: + ```markdown # 新功能模块 ## API 接口 ### 获取网络接口信息 + ```bash curl -X GET "http://localhost:8080/network/interface/eth0" ``` ### 响应示例 + ```json { "id": 1, @@ -82,6 +96,7 @@ curl -X GET "http://localhost:8080/network/interface/eth0" "is_up": true } ``` + ``` ## 注意事项 diff --git a/src/cores/handlers/api_server.rs b/src/cores/handlers/api_server.rs index 6c31697..c47835c 100644 --- a/src/cores/handlers/api_server.rs +++ b/src/cores/handlers/api_server.rs @@ -6,6 +6,7 @@ use crate::db::delete::delete_from_kine; use crate::db::get::{get_all_data_from_kine, get_data_from_kine}; use crate::db::insert::insert_kine; use crate::db::update::update_data_in_kine; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ PubSubEventTopic, ResourcesParams, ServerError, ServerRequest, ServerResult, }; @@ -22,6 +23,7 @@ fn map_diesel_err(error: diesel::result::Error) -> ServerError { pub async fn create_resource( app_state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let ServiceCtx { @@ -29,7 +31,7 @@ pub async fn create_resource( message_daemon, params, .. - } = prepare_ctx(app_state, server_request, None, Some(true))?; + } = prepare_ctx(app_state, request_context, server_request, None, Some(true))?; let ResourcesParams { plural, version, @@ -108,6 +110,7 @@ pub async fn create_resource( pub async fn delete_resource( app_state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { //log_request("delete_resource", ¶ms); @@ -116,7 +119,13 @@ pub async fn delete_resource( message_daemon, params, .. - } = prepare_ctx(app_state, server_request, Some(true), Some(false))?; + } = prepare_ctx( + app_state, + request_context, + server_request, + Some(true), + Some(false), + )?; let ResourcesParams { name, plural, @@ -168,6 +177,7 @@ pub async fn delete_resource( pub async fn update_resource( app_state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { //log_request("update_resource", ¶ms); @@ -176,7 +186,13 @@ pub async fn update_resource( message_daemon, params, .. - } = prepare_ctx(app_state, server_request, Some(true), Some(true))?; + } = prepare_ctx( + app_state, + request_context, + server_request, + Some(true), + Some(true), + )?; let ResourcesParams { name, plural, @@ -228,14 +244,14 @@ pub async fn update_resource( pub async fn get_resource( app_state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { - //log_request("get_resource", ¶ms); let ServiceCtx { mut db_conn, params, .. - } = prepare_ctx(app_state, server_request, None, None)?; + } = prepare_ctx(app_state, request_context, server_request, None, None)?; let ResourcesParams { name, plural, @@ -248,7 +264,7 @@ pub async fn get_resource( if name.is_some() { // 如果name不为空,查询单个resource数据 - log::debug!( + tracing::debug!( "get data from kine: {} {} {}", plural, name.as_ref().unwrap(), @@ -295,13 +311,20 @@ pub async fn get_resource( pub async fn patch_resource( app_state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let ServiceCtx { mut db_conn, message_daemon, params, - } = prepare_ctx(app_state, server_request, Some(true), Some(true))?; + } = prepare_ctx( + app_state, + request_context, + server_request, + Some(true), + Some(true), + )?; let ResourcesParams { name, plural, @@ -362,13 +385,20 @@ pub async fn patch_resource( pub async fn watch_resource( app_state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult> { let ServiceCtx { mut db_conn, message_daemon, params, - } = prepare_ctx(app_state.clone(), server_request, None, Some(false))?; + } = prepare_ctx( + app_state.clone(), + request_context, + server_request, + None, + Some(false), + )?; let ResourcesParams { plural, kind, @@ -403,12 +433,13 @@ struct ServiceCtx { fn prepare_ctx( app_state: Arc, + _: RequestContext, server_request: ServerRequest, name_required: Option, body_required: Option, ) -> ServerResult { let mut params = ResourcesParams::try_from(server_request)?; - log::debug!("request params: {:?}", params); + tracing::debug!("request params: {:?}", params); if name_required.is_some_and(|required| required) && params.name.is_none() { return Err(ServerError::bad_request("name参数未指定")); } @@ -424,7 +455,7 @@ fn prepare_ctx( if params.plural.is_none() { let meta = ResourceMeta::from_kind(params.kind.as_ref().unwrap().as_str()).map_err(|e| { - log::error!("error getting kind from plural: {}", e); + tracing::error!("error getting kind from plural: {}", e); ServerError::bad_request("kind参数不正确") })?; params.plural = Some(meta.plural); @@ -432,14 +463,14 @@ fn prepare_ctx( if params.kind.is_none() { let meta = ResourceMeta::from_plural(params.plural.as_ref().unwrap().as_str()).map_err(|e| { - log::error!("error getting plural from kind: {}", e); + tracing::error!("error getting plural from kind: {}", e); ServerError::bad_request("plural参数不正确") })?; params.kind = Some(meta.kind); } let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let db_conn = db_conn.unwrap(); diff --git a/src/cores/handlers/cluster_info.rs b/src/cores/handlers/cluster_info.rs index d5f3bd6..ffcf9e2 100644 --- a/src/cores/handlers/cluster_info.rs +++ b/src/cores/handlers/cluster_info.rs @@ -1,17 +1,26 @@ use crate::cores::handlers::datamgr::datamgr_api::{GetNodeId, GetUdpPort}; use crate::cores::state::AppState; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; use serde_json::{json, Value}; use std::ffi::CStr; use std::sync::Arc; -pub async fn get_cluster_info(app_state: Arc, _: ServerRequest) -> ServerResult { +pub async fn get_cluster_info( + app_state: Arc, + __request_context: RequestContext, + _: ServerRequest, +) -> ServerResult { Ok(json!({ "cluster_id": app_state.cluster_id })) } -pub async fn get_eventbus_info(app_state: Arc, _: ServerRequest) -> ServerResult { +pub async fn get_eventbus_info( + app_state: Arc, + __request_context: RequestContext, + _: ServerRequest, +) -> ServerResult { let message_cli = app_state.message_cli.clone(); let cluster_plugin_manager = message_cli.get_plugin_manager().unwrap(); diff --git a/src/cores/handlers/consensus.rs b/src/cores/handlers/consensus.rs index 268c86f..6d5485b 100644 --- a/src/cores/handlers/consensus.rs +++ b/src/cores/handlers/consensus.rs @@ -3,11 +3,13 @@ use std::sync::Arc; use fleetmodv2::api_server::{ServerRawResponse, ServerRequest, ServerResponse}; use crate::cores::state::AppState; +use crate::utils::request_context::RequestContext; macro_rules! define_service_fn { ($service_fn_name:ident, $daemon_fn:ident) => { pub async fn $service_fn_name( app_state: Arc, + _request_context: RequestContext, req: ServerRequest, ) -> ServerResponse { let Some(body) = req.body else { @@ -31,7 +33,11 @@ define_service_fn!(handle_requests, handle_requests); define_service_fn!(handle_join, handle_join); // consensus/peers -pub async fn handle_get_peers(app_state: Arc, _: ServerRequest) -> ServerResponse { +pub async fn handle_get_peers( + app_state: Arc, + _request_context: RequestContext, + _: ServerRequest, +) -> ServerResponse { let peers = app_state.consensus_daemon.get_peers().await; let Ok(response_vec) = serde_json::to_vec(&peers) else { return bad_request("invalid response body, serializing response failed"); @@ -40,7 +46,11 @@ pub async fn handle_get_peers(app_state: Arc, _: ServerRequest) -> Ser } // consensus/leave -pub async fn handle_leave(app_state: Arc, _: ServerRequest) -> ServerResponse { +pub async fn handle_leave( + app_state: Arc, + _request_context: RequestContext, + _: ServerRequest, +) -> ServerResponse { let response = app_state.consensus_daemon.handle_leave().await; let Ok(response_vec) = serde_json::to_vec(&response) else { return bad_request("invalid response body, serializing response failed"); @@ -67,7 +77,11 @@ pub mod raft { define_service_fn!(change_membership, change_membership); // consensus/raft/metrics - pub async fn metrics(app_state: Arc, _: ServerRequest) -> ServerResponse { + pub async fn metrics( + app_state: Arc, + _request_context: RequestContext, + _: ServerRequest, + ) -> ServerResponse { let response = app_state.consensus_daemon.metrics().await; let Ok(v) = serde_json::to_vec(&response) else { return bad_request("consensus metrics invalid body, serializing response failed"); diff --git a/src/cores/handlers/datamgr/datamgr_api.rs b/src/cores/handlers/datamgr/datamgr_api.rs index 58318b7..7eda0fc 100644 --- a/src/cores/handlers/datamgr/datamgr_api.rs +++ b/src/cores/handlers/datamgr/datamgr_api.rs @@ -767,7 +767,6 @@ unsafe extern "C" { ) -> ::std::os::raw::c_int; } - /* automatically generated by rust-bindgen 0.71.1 */ unsafe extern "C" { diff --git a/src/cores/handlers/datamgr/route.rs b/src/cores/handlers/datamgr/route.rs index 8e041d4..02e617c 100644 --- a/src/cores/handlers/datamgr/route.rs +++ b/src/cores/handlers/datamgr/route.rs @@ -1,5 +1,6 @@ use crate::cores::handlers::datamgr::datamgr_api; use crate::cores::state::AppState; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ ServerError, ServerRawResponse, ServerRequest, ServerResponse, ServerResult, }; @@ -32,6 +33,7 @@ type DataChar = u8; pub async fn upload_data_handler( state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> ServerResponse { let headers = server_request.headers; @@ -91,7 +93,8 @@ pub async fn upload_data_handler( params: HashMap::new(), body: Some(header_cloud_name.clone().into_bytes()), }; - let publish_result = publish_handler(state.clone(), publish_request).await; + let publish_result = + publish_handler(state.clone(), request_context, publish_request).await; if publish_result .into_raw() @@ -120,6 +123,7 @@ pub async fn upload_data_handler( pub async fn query_data_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -196,6 +200,7 @@ pub async fn query_data_handler( pub async fn download_data_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResponse { let headers = server_request.headers; @@ -272,6 +277,7 @@ pub async fn download_data_handler( pub async fn receive_telemetry_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -298,6 +304,7 @@ pub async fn receive_telemetry_handler( pub async fn report_telemetry_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -333,6 +340,7 @@ pub async fn report_telemetry_handler( pub async fn send_remote_control_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -368,6 +376,7 @@ pub async fn send_remote_control_handler( pub async fn sync_data_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -427,6 +436,7 @@ pub async fn sync_data_handler( pub async fn backup_data_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -497,6 +507,7 @@ pub async fn backup_data_handler( pub async fn recover_data_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -567,6 +578,7 @@ pub async fn recover_data_handler( pub async fn observation_order_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let body = server_request.body.as_ref().unwrap().as_slice(); @@ -601,6 +613,7 @@ pub async fn observation_order_handler( pub async fn processing_order_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let body = server_request.body.as_ref().unwrap().as_slice(); @@ -635,6 +648,7 @@ pub async fn processing_order_handler( pub async fn dispatching_order_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let body = server_request.body.as_ref().unwrap().as_slice(); @@ -669,6 +683,7 @@ pub async fn dispatching_order_handler( pub async fn order_status_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let params_uuid = server_request.params.get("uuid").unwrap(); @@ -709,6 +724,7 @@ pub async fn order_status_handler( pub async fn order_result_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let params_uuid = server_request.params.get("uuid").unwrap(); @@ -747,7 +763,11 @@ pub async fn order_result_handler( } } -pub async fn publish_handler(_: Arc, server_request: ServerRequest) -> ServerResponse { +pub async fn publish_handler( + _: Arc, + _request_context: RequestContext, + server_request: ServerRequest, +) -> ServerResponse { let headers = server_request.headers; let body = server_request.body.as_ref().unwrap().as_slice(); @@ -854,6 +874,7 @@ extern "C" fn message_callback( // 订阅处理函数 pub async fn subscribe_handler( _: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let headers = server_request.headers; @@ -927,7 +948,11 @@ pub async fn subscribe_handler( } // 获取消息处理函数 -pub async fn message_handler(_: Arc, server_request: ServerRequest) -> ServerResponse { +pub async fn message_handler( + _: Arc, + _request_context: RequestContext, + server_request: ServerRequest, +) -> ServerResponse { let bad_request = |body_str| { ServerRawResponse::bad_request() .body(Vec::from(body_str)) @@ -1181,7 +1206,7 @@ pub struct CallbackContext { static RESPONSE_MAP: Lazy>>>> = Lazy::new(|| Arc::new(Mutex::new(HashMap::new()))); -pub async fn request_handler(_: Arc, server_request: ServerRequest) -> ServerResponse { +pub async fn request_handler(_: Arc, _request_context: RequestContext, server_request: ServerRequest) -> ServerResponse { let bad_request = |body_str| { ServerRawResponse::bad_request() .body(Vec::from(body_str)) @@ -1276,7 +1301,7 @@ pub async fn request_handler(_: Arc, server_request: ServerRequest) -> } -pub async fn reply_handler(_: Arc, server_request: ServerRequest) -> ServerResponse { +pub async fn reply_handler(_: Arc, _request_context: RequestContext, server_request: ServerRequest) -> ServerResponse { let headers = server_request.headers; let body = server_request.body.as_ref().unwrap().as_slice(); diff --git a/src/cores/handlers/mod.rs b/src/cores/handlers/mod.rs index 24a784f..634bcf1 100644 --- a/src/cores/handlers/mod.rs +++ b/src/cores/handlers/mod.rs @@ -3,11 +3,11 @@ pub mod cluster_info; pub mod consensus; pub mod datamgr; pub mod network_status; +pub mod payload; +pub mod permissions; +pub mod roles; pub mod router_mgr; pub mod router_topo; pub mod security; pub mod test; pub mod users; -pub mod payload; -pub mod roles; -pub mod permissions; \ No newline at end of file diff --git a/src/cores/handlers/network_status/README.md b/src/cores/handlers/network_status/README.md index 13bc00b..c224f30 100644 --- a/src/cores/handlers/network_status/README.md +++ b/src/cores/handlers/network_status/README.md @@ -5,57 +5,60 @@ 网络状态模块提供了管理网络接口信息和连通性测试的 RESTful API 接口。该模块主要包含以下功能: 1. 网络接口管理 - - 记录和更新网络接口信息 - - 查询网络接口列表 - - 获取特定网络接口信息 - - 更新网络接口配置 + - 记录和更新网络接口信息 + - 查询网络接口列表 + - 获取特定网络接口信息 + - 更新网络接口配置 2. 连通性测试 - - 记录连通性测试结果 - - 查询所有连通性测试 - - 获取特定 IP 的连通性测试结果 + - 记录连通性测试结果 + - 查询所有连通性测试 + - 获取特定 IP 的连通性测试结果 ## 主要结构体 ### 网络接口相关 + - `NetworkInterfaceInfo`: 网络接口信息结构体 - - `id`: 接口 ID - - `interface_name`: 接口名称 - - `ip_address`: IP 地址 - - `mac_address`: MAC 地址 - - `is_up`: 接口状态 - - `speed_mbps`: 接口速度 - - `created_at`: 创建时间 - - `updated_at`: 更新时间 + - `id`: 接口 ID + - `interface_name`: 接口名称 + - `ip_address`: IP 地址 + - `mac_address`: MAC 地址 + - `is_up`: 接口状态 + - `speed_mbps`: 接口速度 + - `created_at`: 创建时间 + - `updated_at`: 更新时间 - `NewNetworkInterface`: 新建网络接口结构体 - - `interface_name`: 接口名称 - - `ip_address`: IP 地址 - - `mac_address`: MAC 地址 - - `is_up`: 接口状态 - - `speed_mbps`: 接口速度 + - `interface_name`: 接口名称 + - `ip_address`: IP 地址 + - `mac_address`: MAC 地址 + - `is_up`: 接口状态 + - `speed_mbps`: 接口速度 - `UpdateNetworkInterface`: 更新网络接口结构体 - - `interface_name`: 接口名称(可选) - - `ip_address`: IP 地址(可选) - - `mac_address`: MAC 地址(可选) - - `is_up`: 接口状态(可选) - - `speed_mbps`: 接口速度(可选) + - `interface_name`: 接口名称(可选) + - `ip_address`: IP 地址(可选) + - `mac_address`: MAC 地址(可选) + - `is_up`: 接口状态(可选) + - `speed_mbps`: 接口速度(可选) ### 连通性测试相关 + - `ConnectivityTestResult`: 连通性测试结果结构体 - - `id`: 测试 ID - - `target_ip`: 目标 IP - - `is_connected`: 是否连通 - - `last_checked_at`: 最后检查时间 + - `id`: 测试 ID + - `target_ip`: 目标 IP + - `is_connected`: 是否连通 + - `last_checked_at`: 最后检查时间 - `NewConnectivityTest`: 新建连通性测试结构体 - - `target_ip`: 目标 IP - - `is_connected`: 是否连通 + - `target_ip`: 目标 IP + - `is_connected`: 是否连通 ## 主要函数 ### 网络接口管理 + - `record_network_interface`: 记录或更新网络接口信息 - `list_network_interfaces`: 获取所有网络接口列表 - `get_network_interface`: 获取特定网络接口信息 @@ -63,6 +66,7 @@ - `set_interface_status`: 设置网络接口状态 ### 连通性测试 + - `record_connectivity_test`: 记录连通性测试结果 - `list_connectivity_tests`: 获取所有连通性测试结果 - `get_connectivity_test`: 获取特定 IP 的连通性测试结果 @@ -72,6 +76,7 @@ ### 网络接口管理 #### 记录/更新网络接口 + ```bash # 记录新的网络接口 curl -X POST "http://localhost:8080/network/interface" \ @@ -86,18 +91,21 @@ curl -X POST "http://localhost:8080/network/interface" \ ``` #### 获取所有网络接口 + ```bash # 获取所有网络接口列表 curl -X GET "http://localhost:8080/network/interfaces" ``` #### 获取特定网络接口 + ```bash # 根据名称获取特定网络接口 curl -X GET "http://localhost:8080/network/interface/eth0" ``` #### 更新网络接口 + ```bash # 更新网络接口信息 curl -X PUT "http://localhost:8080/network/interface/eth0" \ @@ -109,6 +117,7 @@ curl -X PUT "http://localhost:8080/network/interface/eth0" \ ``` #### 设置接口状态 + ```bash # 设置网络接口状态 curl -X PATCH "http://localhost:8080/network/interface/status/eth0" \ @@ -121,6 +130,7 @@ curl -X PATCH "http://localhost:8080/network/interface/status/eth0" \ ### 连通性测试 #### 记录连通性测试 + ```bash # 记录连通性测试结果 curl -X POST "http://localhost:8080/network/connectivity" \ @@ -132,12 +142,14 @@ curl -X POST "http://localhost:8080/network/connectivity" \ ``` #### 获取所有连通性测试 + ```bash # 获取所有连通性测试结果 curl -X GET "http://localhost:8080/network/connectivity" ``` #### 获取特定连通性测试 + ```bash # 根据 IP 获取特定连通性测试结果 curl -X GET "http://localhost:8080/network/connectivity/8.8.8.8" @@ -148,6 +160,7 @@ curl -X GET "http://localhost:8080/network/connectivity/8.8.8.8" ### 网络接口 #### 请求体(创建/更新) + ```json { "interface_name": "eth0", @@ -159,6 +172,7 @@ curl -X GET "http://localhost:8080/network/connectivity/8.8.8.8" ``` #### 响应 + ```json { "id": 1, @@ -175,6 +189,7 @@ curl -X GET "http://localhost:8080/network/connectivity/8.8.8.8" ### 连通性测试 #### 请求体 + ```json { "target_ip": "8.8.8.8", @@ -183,6 +198,7 @@ curl -X GET "http://localhost:8080/network/connectivity/8.8.8.8" ``` #### 响应 + ```json { "id": 1, diff --git a/src/cores/handlers/network_status/mod.rs b/src/cores/handlers/network_status/mod.rs index 1a023fe..739b1f4 100644 --- a/src/cores/handlers/network_status/mod.rs +++ b/src/cores/handlers/network_status/mod.rs @@ -6,7 +6,6 @@ //! - Tracking network connectivity test results //! - Providing network status information through RESTful API endpoints - // Copyright (c) 2025 Institute of Software, Chinese Academy of Sciences // Author: songpenglei, songpenglei@otcaix.iscas.ac.cn // Affiliation: Institute of Software, Chinese Academy of Sciences @@ -20,6 +19,7 @@ use std::sync::Arc; use crate::cores::state::AppState; use crate::db::network_status_ops; +use crate::utils::request_context::RequestContext; use models::{ NewConnectivityTest, NewNetworkInterface, SetInterfaceStatusPayload, UpdateNetworkInterface, }; @@ -152,6 +152,7 @@ fn parse_body( /// * `Err(ServerError)` - An error if the operation fails pub async fn record_network_interface( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state @@ -178,6 +179,7 @@ pub async fn record_network_interface( /// * `Err(ServerError)` - An error if the operation fails pub async fn list_network_interfaces( app_state: Arc, + _request_context: RequestContext, _server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state @@ -203,6 +205,7 @@ pub async fn list_network_interfaces( /// * `Err(ServerError)` - An error if the operation fails pub async fn get_network_interface( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state @@ -230,6 +233,7 @@ pub async fn get_network_interface( /// * `Err(ServerError)` - An error if the operation fails pub async fn update_network_interface( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state @@ -263,6 +267,7 @@ pub async fn update_network_interface( /// * `Err(ServerError)` - An error if the operation fails pub async fn set_interface_status( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state @@ -306,6 +311,7 @@ pub async fn set_interface_status( /// * `Err(ServerError)` - An error if the operation fails pub async fn record_connectivity_test( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state @@ -332,6 +338,7 @@ pub async fn record_connectivity_test( /// * `Err(ServerError)` - An error if the operation fails pub async fn list_connectivity_tests( app_state: Arc, + _request_context: RequestContext, _server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state @@ -357,6 +364,7 @@ pub async fn list_connectivity_tests( /// * `Err(ServerError)` - An error if the operation fails pub async fn get_connectivity_test( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let mut db_conn = app_state diff --git a/src/cores/handlers/network_status/models.rs b/src/cores/handlers/network_status/models.rs index 6a0163b..7064086 100644 --- a/src/cores/handlers/network_status/models.rs +++ b/src/cores/handlers/network_status/models.rs @@ -10,7 +10,8 @@ use crate::schema::{connectivity_tests, network_interfaces}; use diesel::{AsChangeset, Identifiable, Insertable, Queryable, QueryableByName, Selectable}; -use serde::{Deserialize, Serialize}; // Assuming schema.rs is accessible via crate::schema +use serde::{Deserialize, Serialize}; +// Assuming schema.rs is accessible via crate::schema // ------------------ Network Interface Models ------------------ diff --git a/src/cores/handlers/payload.rs b/src/cores/handlers/payload.rs index 334f985..430f1b2 100644 --- a/src/cores/handlers/payload.rs +++ b/src/cores/handlers/payload.rs @@ -1,10 +1,11 @@ -use std::sync::Arc; -use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; -use serde_json::{json, Value}; use crate::cores::state::AppState; use crate::db::delete::delete_from_payload; use crate::db::get::get_data_from_payload; use crate::db::insert::insert_payload; +use crate::utils::request_context::RequestContext; +use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; +use serde_json::{json, Value}; +use std::sync::Arc; fn map_diesel_err(error: diesel::result::Error) -> ServerError { ServerError::internal_error(error.to_string().as_str()) @@ -12,9 +13,9 @@ fn map_diesel_err(error: diesel::result::Error) -> ServerError { pub async fn insert_payload_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { - let payload_kind = server_request.params.get("payload_kind").unwrap(); let kind = match payload_kind.as_str() { @@ -31,7 +32,7 @@ pub async fn insert_payload_handler( let cluster_id = app_state.cluster_id.clone(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } @@ -39,26 +40,21 @@ pub async fn insert_payload_handler( let body = server_request.body.as_ref().unwrap(); - let data_str = std::str::from_utf8(body).map_err(|_| { - ServerError::bad_request("Request body is not valid UTF-8") - })?; - + let data_str = std::str::from_utf8(body) + .map_err(|_| ServerError::bad_request("Request body is not valid UTF-8"))?; - insert_payload( - &mut db_conn, - &cluster_id.as_str(), - kind, - data_str, - ).await.map_err(map_diesel_err)?; + insert_payload(&mut db_conn, &cluster_id.as_str(), kind, data_str) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(data_str)?) } pub async fn get_payload_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { - let payload_kind = server_request.params.get("payload_kind").unwrap(); let kind = match payload_kind.as_str() { @@ -76,26 +72,24 @@ pub async fn get_payload_handler( let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); - let res = get_data_from_payload( - &mut db_conn, - cluster_id, - kind, - ).await.map_err(map_diesel_err)?; + let res = get_data_from_payload(&mut db_conn, cluster_id, kind) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(res)?) } pub async fn delete_payload_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { - let payload_kind = server_request.params.get("payload_kind").unwrap(); let kind = match payload_kind.as_str() { @@ -113,20 +107,18 @@ pub async fn delete_payload_handler( let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); - delete_from_payload( - &mut db_conn, - cluster_id, - kind, - ).await.map_err(map_diesel_err)?; + delete_from_payload(&mut db_conn, cluster_id, kind) + .await + .map_err(map_diesel_err)?; Ok(json!({ "cluster_id": cluster_id, "kind": kind, })) -} \ No newline at end of file +} diff --git a/src/cores/handlers/permissions.rs b/src/cores/handlers/permissions.rs index 37992da..4705950 100644 --- a/src/cores/handlers/permissions.rs +++ b/src/cores/handlers/permissions.rs @@ -1,14 +1,17 @@ -use std::sync::Arc; -use serde::Deserialize; -use serde_json::{json, Value}; -use uuid::Uuid; use crate::cores::state::AppState; -use crate::db::check_exist::{check_permissions_by_id}; +use crate::db::check_exist::check_permissions_by_id; use crate::db::delete::{delete_from_permissions_by_id, delete_role_permission}; -use crate::db::get::{get_all_data_from_permissions, get_data_from_permissions_by_id, get_roles_by_permission_id}; -use crate::db::insert::{insert_permissions}; +use crate::db::get::{ + get_all_data_from_permissions, get_data_from_permissions_by_id, get_roles_by_permission_id, +}; +use crate::db::insert::insert_permissions; use crate::db::update::update_permission_in_permissions_by_id; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; +use serde::Deserialize; +use serde_json::{json, Value}; +use std::sync::Arc; +use uuid::Uuid; /// 将 Diesel 数据库错误转换为服务器错误 fn map_diesel_err(error: diesel::result::Error) -> ServerError { @@ -18,30 +21,31 @@ fn map_diesel_err(error: diesel::result::Error) -> ServerError { /// 创建权限的请求结构体 #[derive(Debug, Deserialize)] struct PermissionCreateRequest { - url_path: String, // API 路径 - http_method: String, // HTTP 方法 + url_path: String, // API 路径 + http_method: String, // HTTP 方法 description: Option, // 权限描述(可选) } /// 创建权限的处理函数 -/// +/// /// # 功能 /// - 验证请求数据的合法性 /// - 生成新的权限ID /// - 在数据库中创建新的权限记录 -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 包含请求数据的服务器请求对象 pub async fn create_permission_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); @@ -64,7 +68,7 @@ pub async fn create_permission_handler( // 生成新的权限ID(UUID) let permission_id = Uuid::new_v4().to_string(); - + // 在数据库中插入新权限 insert_permissions( &mut db_conn, @@ -72,7 +76,9 @@ pub async fn create_permission_handler( &create_req.url_path, &create_req.http_method.to_uppercase(), create_req.description.as_deref(), - ).await.map_err(map_diesel_err)?; + ) + .await + .map_err(map_diesel_err)?; // 返回成功响应,包含新创建的权限信息 Ok(json!({ @@ -85,31 +91,32 @@ pub async fn create_permission_handler( /// 更新权限的请求结构体 #[derive(Debug, Deserialize)] struct UpdatePermissionRequest { - permission_id: String, // 权限ID - url_path: String, // API路径 - http_method: String, // HTTP方法 + permission_id: String, // 权限ID + url_path: String, // API路径 + http_method: String, // HTTP方法 description: Option, // 权限描述(可选) } /// 更新权限的处理函数 -/// +/// /// # 功能 /// - 验证请求数据的合法性 /// - 检查权限是否存在 /// - 更新权限信息 -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 包含请求数据的服务器请求对象 pub async fn update_permission_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); @@ -131,10 +138,9 @@ pub async fn update_permission_handler( } // 检查权限是否存在 - let permission_exists = check_permissions_by_id( - &mut db_conn, - &update_req.permission_id, - ).await.map_err(map_diesel_err)?; + let permission_exists = check_permissions_by_id(&mut db_conn, &update_req.permission_id) + .await + .map_err(map_diesel_err)?; if !permission_exists { return Err(ServerError::not_found("权限不存在")); @@ -147,7 +153,9 @@ pub async fn update_permission_handler( &update_req.url_path, &update_req.http_method.to_uppercase(), update_req.description.as_deref(), - ).await.map_err(map_diesel_err)?; + ) + .await + .map_err(map_diesel_err)?; // 返回成功响应,包含更新的权限ID Ok(json!({ @@ -156,17 +164,18 @@ pub async fn update_permission_handler( } /// 删除权限的处理函数 -/// +/// /// # 功能 /// - 检查权限是否存在 /// - 删除所有与该权限相关的角色-权限关联 /// - 删除权限记录 -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 包含权限ID的服务器请求对象 pub async fn delete_permission_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 从请求参数中获取权限ID @@ -176,41 +185,36 @@ pub async fn delete_permission_handler( let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); // 检查权限是否存在 - let permission_exists = check_permissions_by_id( - &mut db_conn, - &permission_id, - ).await.map_err(map_diesel_err)?; + let permission_exists = check_permissions_by_id(&mut db_conn, &permission_id) + .await + .map_err(map_diesel_err)?; if !permission_exists { return Err(ServerError::not_found("权限不存在")); } // 获取所有拥有此权限的角色 - let roles_with_permission = get_roles_by_permission_id( - &mut db_conn, - &permission_id, - ).await.map_err(map_diesel_err)?; + let roles_with_permission = get_roles_by_permission_id(&mut db_conn, &permission_id) + .await + .map_err(map_diesel_err)?; // 删除所有角色-权限关联记录 for role in roles_with_permission { - delete_role_permission( - &mut db_conn, - &role.role_id, - &permission_id, - ).await.map_err(map_diesel_err)?; + delete_role_permission(&mut db_conn, &role.role_id, &permission_id) + .await + .map_err(map_diesel_err)?; } // 删除权限本身 - delete_from_permissions_by_id( - &mut db_conn, - &permission_id, - ).await.map_err(map_diesel_err)?; + delete_from_permissions_by_id(&mut db_conn, &permission_id) + .await + .map_err(map_diesel_err)?; // 返回被删除的权限ID Ok(json!({ @@ -219,43 +223,43 @@ pub async fn delete_permission_handler( } /// 获取权限信息的处理函数 -/// +/// /// # 功能 /// - 如果提供了权限ID,获取单个权限的详细信息 /// - 如果没有提供权限ID,获取所有权限的列表 -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 服务器请求对象,可能包含权限ID参数 pub async fn get_permission_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); if let Some(permission_id) = server_request.params.get("permission_id") { // 如果提供了权限ID,获取单个权限信息 - let res = get_data_from_permissions_by_id( - &mut db_conn, - &permission_id, - ).await.map_err(map_diesel_err)?; + let res = get_data_from_permissions_by_id(&mut db_conn, &permission_id) + .await + .map_err(map_diesel_err)?; if res.is_none() { - return Err(ServerError::not_found("权限不存在")) + return Err(ServerError::not_found("权限不存在")); } Ok(serde_json::to_value(res)?) } else { // 如果没有提供权限ID,获取所有权限列表 - let res = get_all_data_from_permissions( - &mut db_conn - ).await.map_err(map_diesel_err)?; + let res = get_all_data_from_permissions(&mut db_conn) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(res)?) } -} \ No newline at end of file +} diff --git a/src/cores/handlers/roles.rs b/src/cores/handlers/roles.rs index 293147c..28c507d 100644 --- a/src/cores/handlers/roles.rs +++ b/src/cores/handlers/roles.rs @@ -1,14 +1,17 @@ -use std::sync::Arc; -use serde::Deserialize; -use serde_json::{json, Value}; -use uuid::Uuid; use crate::cores::state::AppState; -use crate::db::check_exist::{check_roles_by_id, check_permissions_by_id}; +use crate::db::check_exist::{check_permissions_by_id, check_roles_by_id}; use crate::db::delete::{delete_from_roles_by_id, delete_role_permission}; -use crate::db::get::{get_all_data_from_roles, get_data_from_roles_by_id, get_permissions_by_role_id}; -use crate::db::insert::{insert_roles, insert_role_permission}; +use crate::db::get::{ + get_all_data_from_roles, get_data_from_roles_by_id, get_permissions_by_role_id, +}; +use crate::db::insert::{insert_role_permission, insert_roles}; use crate::db::update::update_role_name_in_roles_by_id; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; +use serde::Deserialize; +use serde_json::{json, Value}; +use std::sync::Arc; +use uuid::Uuid; fn map_diesel_err(error: diesel::result::Error) -> ServerError { ServerError::internal_error(error.to_string().as_str()) @@ -24,13 +27,14 @@ struct RoleCreateRequest { /// 创建角色的处理函数 pub async fn create_role_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); @@ -53,38 +57,39 @@ pub async fn create_role_handler( // 如果请求中包含权限ID列表,验证所有权限是否存在 if let Some(permission_ids) = &create_req.permission_ids { for permission_id in permission_ids { - let permission_exists = check_permissions_by_id( - &mut db_conn, - permission_id, - ).await.map_err(map_diesel_err)?; + let permission_exists = check_permissions_by_id(&mut db_conn, permission_id) + .await + .map_err(map_diesel_err)?; // 如果有任何权限不存在,返回错误 if !permission_exists { - return Err(ServerError::bad_request(format!("Permission {} not found", permission_id).as_str())); + return Err(ServerError::bad_request( + format!("Permission {} not found", permission_id).as_str(), + )); } } } // 生成新的角色ID(UUID) let role_id = Uuid::new_v4().to_string(); - + // 在数据库中插入新角色 insert_roles( &mut db_conn, &role_id, &create_req.role_name, create_req.description.as_deref(), - ).await.map_err(map_diesel_err)?; + ) + .await + .map_err(map_diesel_err)?; // 如果请求中包含权限ID列表,为角色分配这些权限 if let Some(permission_ids) = create_req.permission_ids { for permission_id in permission_ids { // 在role_permissions表中创建角色-权限关联 - insert_role_permission( - &mut db_conn, - &role_id, - &permission_id, - ).await.map_err(map_diesel_err)?; + insert_role_permission(&mut db_conn, &role_id, &permission_id) + .await + .map_err(map_diesel_err)?; } } @@ -105,12 +110,13 @@ struct UpdateRoleRequest { pub async fn update_role_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); @@ -131,10 +137,9 @@ pub async fn update_role_handler( } // 检查角色是否存在 - let role_exists = check_roles_by_id( - &mut db_conn, - &update_req.role_id, - ).await.map_err(map_diesel_err)?; + let role_exists = check_roles_by_id(&mut db_conn, &update_req.role_id) + .await + .map_err(map_diesel_err)?; if !role_exists { return Err(ServerError::not_found("Role not found")); @@ -143,13 +148,14 @@ pub async fn update_role_handler( // 验证所有提供的权限是否存在 if let Some(permission_ids) = &update_req.permission_ids { for permission_id in permission_ids { - let permission_exists = check_permissions_by_id( - &mut db_conn, - permission_id, - ).await.map_err(map_diesel_err)?; + let permission_exists = check_permissions_by_id(&mut db_conn, permission_id) + .await + .map_err(map_diesel_err)?; if !permission_exists { - return Err(ServerError::bad_request(format!("Permission {} not found", permission_id).as_str())); + return Err(ServerError::bad_request( + format!("Permission {} not found", permission_id).as_str(), + )); } } } @@ -157,16 +163,15 @@ pub async fn update_role_handler( // 如果提供了role_name或description,则更新角色信息 if update_req.role_name.is_some() || update_req.description.is_some() { // 从数据库获取当前角色信息 - let current_role = get_data_from_roles_by_id( - &mut db_conn, - &update_req.role_id, - ).await.map_err(map_diesel_err)?; - + let current_role = get_data_from_roles_by_id(&mut db_conn, &update_req.role_id) + .await + .map_err(map_diesel_err)?; + let current_role = current_role.ok_or_else(|| ServerError::not_found("Role not found"))?; // 如果role_name未提供,使用当前的role_name let role_name = update_req.role_name.unwrap_or(current_role.role_name); - + // 如果description未提供,保留当前的description let description = if update_req.description.is_some() { update_req.description.as_deref() @@ -174,46 +179,41 @@ pub async fn update_role_handler( current_role.description.as_deref() }; - update_role_name_in_roles_by_id( - &mut db_conn, - &update_req.role_id, - &role_name, - description, - ).await.map_err(map_diesel_err)?; + update_role_name_in_roles_by_id(&mut db_conn, &update_req.role_id, &role_name, description) + .await + .map_err(map_diesel_err)?; } // 获取当前权限 - let current_permissions = get_permissions_by_role_id( - &mut db_conn, - &update_req.role_id, - ).await.map_err(map_diesel_err)?; + let current_permissions = get_permissions_by_role_id(&mut db_conn, &update_req.role_id) + .await + .map_err(map_diesel_err)?; // 如果提供了新的权限列表,更新权限 if let Some(new_permission_ids) = update_req.permission_ids { // 删除不在新列表中的权限 - let current_permission_ids: Vec = current_permissions.iter() + let current_permission_ids: Vec = current_permissions + .iter() .map(|p| p.permission_id.clone()) .collect(); for current_perm_id in current_permission_ids { if !new_permission_ids.contains(¤t_perm_id) { - delete_role_permission( - &mut db_conn, - &update_req.role_id, - ¤t_perm_id, - ).await.map_err(map_diesel_err)?; + delete_role_permission(&mut db_conn, &update_req.role_id, ¤t_perm_id) + .await + .map_err(map_diesel_err)?; } } // 添加新的权限 for permission_id in new_permission_ids { - let exists = current_permissions.iter().any(|p| p.permission_id == permission_id); + let exists = current_permissions + .iter() + .any(|p| p.permission_id == permission_id); if !exists { - insert_role_permission( - &mut db_conn, - &update_req.role_id, - &permission_id, - ).await.map_err(map_diesel_err)?; + insert_role_permission(&mut db_conn, &update_req.role_id, &permission_id) + .await + .map_err(map_diesel_err)?; } } } @@ -226,6 +226,7 @@ pub async fn update_role_handler( /// 删除角色的处理函数 pub async fn delete_role_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 从请求参数中获取角色ID @@ -235,41 +236,36 @@ pub async fn delete_role_handler( let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); // 检查角色是否存在 - let role_exists = check_roles_by_id( - &mut db_conn, - &role_id, - ).await.map_err(map_diesel_err)?; + let role_exists = check_roles_by_id(&mut db_conn, &role_id) + .await + .map_err(map_diesel_err)?; if !role_exists { return Err(ServerError::not_found("Role not found")); } // 获取角色当前的所有权限,以便清理 - let current_permissions = get_permissions_by_role_id( - &mut db_conn, - &role_id, - ).await.map_err(map_diesel_err)?; + let current_permissions = get_permissions_by_role_id(&mut db_conn, &role_id) + .await + .map_err(map_diesel_err)?; // 删除角色的所有权限关联 for permission in current_permissions { - delete_role_permission( - &mut db_conn, - &role_id, - &permission.permission_id, - ).await.map_err(map_diesel_err)?; + delete_role_permission(&mut db_conn, &role_id, &permission.permission_id) + .await + .map_err(map_diesel_err)?; } // 删除角色本身 - delete_from_roles_by_id( - &mut db_conn, - &role_id, - ).await.map_err(map_diesel_err)?; + delete_from_roles_by_id(&mut db_conn, &role_id) + .await + .map_err(map_diesel_err)?; // 返回被删除的角色ID Ok(json!({ @@ -280,34 +276,33 @@ pub async fn delete_role_handler( /// 获取角色信息的处理函数 pub async fn get_role_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); if let Some(role_id) = server_request.params.get("role_id") { // 获取单个角色信息 - let role = get_data_from_roles_by_id( - &mut db_conn, - &role_id, - ).await.map_err(map_diesel_err)?; + let role = get_data_from_roles_by_id(&mut db_conn, &role_id) + .await + .map_err(map_diesel_err)?; if role.is_none() { - return Err(ServerError::not_found("Role not found")) + return Err(ServerError::not_found("Role not found")); } let role = role.unwrap(); // 获取该角色的权限列表 - let permissions = get_permissions_by_role_id( - &mut db_conn, - &role_id, - ).await.map_err(map_diesel_err)?; + let permissions = get_permissions_by_role_id(&mut db_conn, &role_id) + .await + .map_err(map_diesel_err)?; // 组合角色信息和权限信息 Ok(json!({ @@ -320,17 +315,16 @@ pub async fn get_role_handler( })) } else { // 获取所有角色列表 - let roles = get_all_data_from_roles( - &mut db_conn - ).await.map_err(map_diesel_err)?; + let roles = get_all_data_from_roles(&mut db_conn) + .await + .map_err(map_diesel_err)?; // 为每个角色获取其权限列表 let mut roles_with_permissions = Vec::new(); for role in roles { - let permissions = get_permissions_by_role_id( - &mut db_conn, - &role.role_id, - ).await.map_err(map_diesel_err)?; + let permissions = get_permissions_by_role_id(&mut db_conn, &role.role_id) + .await + .map_err(map_diesel_err)?; roles_with_permissions.push(json!({ "role_id": role.role_id, @@ -344,4 +338,4 @@ pub async fn get_role_handler( Ok(json!(roles_with_permissions)) } -} \ No newline at end of file +} diff --git a/src/cores/handlers/router_mgr.rs b/src/cores/handlers/router_mgr.rs index 891cc8b..61d4d56 100644 --- a/src/cores/handlers/router_mgr.rs +++ b/src/cores/handlers/router_mgr.rs @@ -1,12 +1,13 @@ -use std::sync::Arc; -use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; use crate::cores::state::AppState; use crate::db::check_exist::check_routermgr; use crate::db::delete::delete_from_routermgr; use crate::db::insert::insert_routermgr; use crate::db::update::update_routermgr; +use crate::utils::request_context::RequestContext; +use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::sync::Arc; fn map_diesel_err(error: diesel::result::Error) -> ServerError { ServerError::internal_error(error.to_string().as_str()) @@ -22,20 +23,20 @@ struct RouterMgrInfoRequest { #[serde(rename = "intfID")] intf_id: i32, #[serde(rename = "OpCode")] - op_code: i32 + op_code: i32, } pub async fn routermgr_action_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { - let msg_from = server_request.params.get("msg_from").unwrap(); let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } @@ -51,34 +52,27 @@ pub async fn routermgr_action_handler( match req.op_code { 0 => { - let router_exists = check_routermgr( - &mut db_conn, - &msg_from, - &req.dst_ip, - req.prefix_len - ).await.map_err(map_diesel_err)?; + let router_exists = + check_routermgr(&mut db_conn, &msg_from, &req.dst_ip, req.prefix_len) + .await + .map_err(map_diesel_err)?; if !router_exists { return Err(ServerError::duplicated("This router status is not exist")); } - delete_from_routermgr( - &mut db_conn, - &msg_from, - &req.dst_ip, - req.prefix_len, - ).await.map_err(map_diesel_err)?; + delete_from_routermgr(&mut db_conn, &msg_from, &req.dst_ip, req.prefix_len) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(req)?) - }, + } 1 => { - let router_exists = check_routermgr( - &mut db_conn, - &msg_from, - &req.dst_ip, - req.prefix_len - ).await.map_err(map_diesel_err)?; + let router_exists = + check_routermgr(&mut db_conn, &msg_from, &req.dst_ip, req.prefix_len) + .await + .map_err(map_diesel_err)?; if router_exists { return Err(ServerError::duplicated("This router status is exist")); @@ -91,18 +85,18 @@ pub async fn routermgr_action_handler( req.prefix_len, &req.next_hop, req.intf_id, - ).await.map_err(map_diesel_err)?; + ) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(req)?) - }, + } 2 => { - let router_exists = check_routermgr( - &mut db_conn, - &msg_from, - &req.dst_ip, - req.prefix_len - ).await.map_err(map_diesel_err)?; + let router_exists = + check_routermgr(&mut db_conn, &msg_from, &req.dst_ip, req.prefix_len) + .await + .map_err(map_diesel_err)?; if !router_exists { return Err(ServerError::duplicated("This router status is not exist")); @@ -115,13 +109,13 @@ pub async fn routermgr_action_handler( req.prefix_len, &req.next_hop, req.intf_id, - ).await.map_err(map_diesel_err)?; + ) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(req)?) - }, - - _ => { - Err(ServerError::duplicated("This op_code is not supported")) } + + _ => Err(ServerError::duplicated("This op_code is not supported")), } } diff --git a/src/cores/handlers/router_topo.rs b/src/cores/handlers/router_topo.rs index 4bff97c..1ff3d47 100644 --- a/src/cores/handlers/router_topo.rs +++ b/src/cores/handlers/router_topo.rs @@ -1,11 +1,11 @@ -use std::sync::Arc; -use std::collections::HashMap; use crate::cores::state::AppState; +// 导入路由结构体 +use crate::db::get::{query_all_routermgr, RouterMgrInfoResult}; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; use serde_json::Value; -// 导入路由结构体 -use crate::db::get::{query_all_routermgr, RouterMgrInfoResult}; - +use std::collections::HashMap; +use std::sync::Arc; // 构建拓扑:源 -> (目的, 出端口) fn build_topology(entries: &Vec) -> HashMap> { @@ -21,12 +21,13 @@ fn build_topology(entries: &Vec) -> HashMap, + _request_context: RequestContext, _server_request: ServerRequest, ) -> ServerResult { // 从数据库连接池中获取连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); @@ -41,4 +42,4 @@ pub async fn generate_topology_handler( // 返回拓扑结构 Ok(serde_json::to_value(topology)?) -} \ No newline at end of file +} diff --git a/src/cores/handlers/security.rs b/src/cores/handlers/security.rs index 75efa1c..d221118 100644 --- a/src/cores/handlers/security.rs +++ b/src/cores/handlers/security.rs @@ -1,10 +1,14 @@ -use std::sync::Arc; +use crate::cores::state::AppState; +use crate::db::get::{ + get_all_data_from_security, get_data_from_security_by_container_id, + get_data_from_security_by_image_id, +}; +use crate::db::insert::insert_security; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::cores::state::AppState; -use crate::db::get::{get_all_data_from_security, get_data_from_security_by_container_id, get_data_from_security_by_image_id}; -use crate::db::insert::insert_security; +use std::sync::Arc; fn map_diesel_err(error: diesel::result::Error) -> ServerError { ServerError::internal_error(error.to_string().as_str()) @@ -22,12 +26,13 @@ struct SecurityInfoRequest { } pub async fn insert_metric_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } @@ -49,35 +54,38 @@ pub async fn insert_metric_handler( &insert_req.image_id, &insert_req.security_value, &insert_req.measure_time, - ).await.map_err(map_diesel_err)?; + ) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(insert_req)?) } pub async fn list_metric_handler( app_state: Arc, + _request_context: RequestContext, _: ServerRequest, ) -> ServerResult { - let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); //虽然插入的时候不去重 但是list的时候会去重 同样的数据只返回一次 - let res = get_all_data_from_security( - &mut db_conn - ).await.map_err(map_diesel_err)?; + let res = get_all_data_from_security(&mut db_conn) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(res)?) } pub async fn get_metric_by_container_id_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let container_id = server_request.params.get("container_id").unwrap(); @@ -85,19 +93,18 @@ pub async fn get_metric_by_container_id_handler( let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); - let res = get_data_from_security_by_container_id( - &mut db_conn, - &container_id, - ).await.map_err(map_diesel_err)?; + let res = get_data_from_security_by_container_id(&mut db_conn, &container_id) + .await + .map_err(map_diesel_err)?; if res.is_none() { - return Err(ServerError::not_found("Can not find this record")) + return Err(ServerError::not_found("Can not find this record")); } Ok(serde_json::to_value(res)?) @@ -105,6 +112,7 @@ pub async fn get_metric_by_container_id_handler( pub async fn get_metric_by_image_id_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { let image_id = server_request.params.get("image_id").unwrap(); @@ -112,19 +120,18 @@ pub async fn get_metric_by_image_id_handler( let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("error getting db conn: {}", e); + tracing::error!("error getting db conn: {}", e); return Err(ServerError::internal_error("DB pool error")); } let mut db_conn = db_conn.unwrap(); - let res = get_data_from_security_by_image_id( - &mut db_conn, - &image_id, - ).await.map_err(map_diesel_err)?; + let res = get_data_from_security_by_image_id(&mut db_conn, &image_id) + .await + .map_err(map_diesel_err)?; if res.is_none() { - return Err(ServerError::not_found("Can not find this record")) + return Err(ServerError::not_found("Can not find this record")); } Ok(serde_json::to_value(res)?) -} \ No newline at end of file +} diff --git a/src/cores/handlers/test.rs b/src/cores/handlers/test.rs index 784a5f3..ddd163b 100644 --- a/src/cores/handlers/test.rs +++ b/src/cores/handlers/test.rs @@ -1,15 +1,24 @@ use crate::cores::state::AppState; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ServerRawResponse, ServerRequest, ServerResponse, ServerResult}; use serde_json::{json, Value}; use std::sync::Arc; -pub async fn test_json_route(_: Arc, _: ServerRequest) -> ServerResult { +pub async fn test_json_route( + _: Arc, + _: RequestContext, + _: ServerRequest, +) -> ServerResult { Ok(json!({ "test_json_route": "is_success" })) } -pub async fn test_raw_route(_: Arc, _: ServerRequest) -> ServerResponse { +pub async fn test_raw_route( + _: Arc, + _: RequestContext, + _: ServerRequest, +) -> ServerResponse { ServerRawResponse::ok() .body("test_raw_route is success".into()) .build() diff --git a/src/cores/handlers/users.rs b/src/cores/handlers/users.rs index a363e80..6247678 100644 --- a/src/cores/handlers/users.rs +++ b/src/cores/handlers/users.rs @@ -1,16 +1,22 @@ -use std::sync::Arc; -use serde::Deserialize; -use serde_json::{json, Value}; -use uuid::Uuid; use crate::cores::state::AppState; -use crate::db::check_exist::{check_users_by_id, check_users_by_name, check_roles_by_id}; +use crate::db::check_exist::{check_roles_by_id, check_users_by_id, check_users_by_name}; use crate::db::delete::delete_from_users_by_id; -use crate::db::get::{get_all_data_from_users, get_data_from_users_by_id, get_id_from_users_by_name, get_password_from_users_by_name}; +use crate::db::get::{ + get_all_data_from_users, get_data_from_users_by_id, get_id_from_users_by_name, + get_password_from_users_by_name, +}; use crate::db::insert::insert_users; -use crate::db::update::{update_password_in_users_by_id, update_username_in_users_by_id, update_role_in_users_by_id}; -use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; +use crate::db::update::{ + update_password_in_users_by_id, update_role_in_users_by_id, update_username_in_users_by_id, +}; use crate::utils::password::{hash_password, verify_password}; +use crate::utils::request_context::RequestContext; use crate::utils::token::generate_token; +use fleetmodv2::api_server::{ServerError, ServerRequest, ServerResult}; +use serde::Deserialize; +use serde_json::{json, Value}; +use std::sync::Arc; +use uuid::Uuid; fn map_diesel_err(error: diesel::result::Error) -> ServerError { ServerError::internal_error(error.to_string().as_str()) @@ -24,26 +30,27 @@ struct UserInfoRequest { } /// 用户注册处理函数 -/// +/// /// # 功能 /// - 验证用户名和密码的合法性 /// - 检查用户名是否已存在 /// - 验证角色ID是否存在 /// - 对密码进行哈希处理 /// - 创建新用户记录 -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 包含用户注册信息的服务器请求对象 pub async fn register_user_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); @@ -64,20 +71,18 @@ pub async fn register_user_handler( } // 检查用户名是否已存在 - let user_exists = check_users_by_name( - &mut db_conn, - ®ister_req.username, - ).await.map_err(map_diesel_err)?; + let user_exists = check_users_by_name(&mut db_conn, ®ister_req.username) + .await + .map_err(map_diesel_err)?; if user_exists { return Err(ServerError::duplicated("用户名已存在,请更换用户名后重试")); } // 验证角色ID - let role_exists = check_roles_by_id( - &mut db_conn, - ®ister_req.role_id, - ).await.map_err(map_diesel_err)?; + let role_exists = check_roles_by_id(&mut db_conn, ®ister_req.role_id) + .await + .map_err(map_diesel_err)?; if !role_exists { return Err(ServerError::not_found("角色不存在")); @@ -97,7 +102,9 @@ pub async fn register_user_handler( ®ister_req.username, &password_hash, ®ister_req.role_id, - ).await.map_err(map_diesel_err)?; + ) + .await + .map_err(map_diesel_err)?; // 返回成功响应,包含用户信息 Ok(json!({ @@ -108,25 +115,26 @@ pub async fn register_user_handler( } /// 用户登录处理函数 -/// +/// /// # 功能 /// - 验证用户名和密码的合法性 /// - 检查用户是否存在 /// - 验证密码是否正确 /// - 生成用户认证令牌(Token) -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池和令牌密钥等资源 /// - server_request: 包含用户登录信息的服务器请求对象 pub async fn login_user_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); @@ -147,33 +155,33 @@ pub async fn login_user_handler( } // 检查用户是否存在 - let user_exists = check_users_by_name( - &mut db_conn, - &login_req.username, - ).await.map_err(map_diesel_err)?; + let user_exists = check_users_by_name(&mut db_conn, &login_req.username) + .await + .map_err(map_diesel_err)?; if !user_exists { return Err(ServerError::duplicated("用户不存在,请检查用户名")); } // 获取数据库中存储的密码哈希值 - let password_hash = get_password_from_users_by_name( - &mut db_conn, - &login_req.username, - ).await.map_err(map_diesel_err)?.unwrap(); + let password_hash = get_password_from_users_by_name(&mut db_conn, &login_req.username) + .await + .map_err(map_diesel_err)? + .unwrap(); // 验证密码是否正确 let verify_password = verify_password(&login_req.password, &password_hash).unwrap(); if verify_password { // 获取用户ID - let user_id = get_id_from_users_by_name( - &mut db_conn, - &login_req.username, - ).await.map_err(map_diesel_err)?.unwrap(); + let user_id = get_id_from_users_by_name(&mut db_conn, &login_req.username) + .await + .map_err(map_diesel_err)? + .unwrap(); // 生成用户认证令牌 - let user_token = generate_token(&user_id, &login_req.username, &app_state.token_secret).unwrap(); + let user_token = + generate_token(&user_id, &login_req.username, &app_state.token_secret).unwrap(); // 返回用户ID和认证令牌 Ok(json!({ @@ -189,32 +197,33 @@ pub async fn login_user_handler( #[derive(Debug, Deserialize)] struct UpdateUserRequest { user_id: String, - username: Option, // 可选的新用户名 - password: Option, // 可选的新密码 - role_id: Option, // 可选的新角色ID + username: Option, // 可选的新用户名 + password: Option, // 可选的新密码 + role_id: Option, // 可选的新角色ID } /// 更新用户信息处理函数 -/// +/// /// # 功能 /// - 支持同时更新用户名、密码和角色ID /// - 只更新请求中包含的字段 /// - 验证所有更新字段的合法性 /// - 检查用户是否存在 /// - 检查新角色是否存在(如果要更新角色) -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 包含用户更新信息的服务器请求对象 pub async fn update_user_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); @@ -230,10 +239,9 @@ pub async fn update_user_handler( }; // 检查用户是否存在 - let user_exists = check_users_by_id( - &mut db_conn, - &update_req.user_id, - ).await.map_err(map_diesel_err)?; + let user_exists = check_users_by_id(&mut db_conn, &update_req.user_id) + .await + .map_err(map_diesel_err)?; if !user_exists { return Err(ServerError::not_found("用户不存在")); @@ -246,11 +254,9 @@ pub async fn update_user_handler( return Err(ServerError::bad_request("用户名过长")); } // 更新用户名 - update_username_in_users_by_id( - &mut db_conn, - &update_req.user_id, - new_username, - ).await.map_err(map_diesel_err)?; + update_username_in_users_by_id(&mut db_conn, &update_req.user_id, new_username) + .await + .map_err(map_diesel_err)?; } // 如果要更新密码 @@ -265,58 +271,53 @@ pub async fn update_user_handler( Err(_) => return Err(ServerError::internal_error("密码哈希处理失败")), }; // 更新密码 - update_password_in_users_by_id( - &mut db_conn, - &update_req.user_id, - &password_hash, - ).await.map_err(map_diesel_err)?; + update_password_in_users_by_id(&mut db_conn, &update_req.user_id, &password_hash) + .await + .map_err(map_diesel_err)?; } // 如果要更新角色 if let Some(new_role_id) = &update_req.role_id { // 检查新角色是否存在 - let role_exists = check_roles_by_id( - &mut db_conn, - new_role_id, - ).await.map_err(map_diesel_err)?; + let role_exists = check_roles_by_id(&mut db_conn, new_role_id) + .await + .map_err(map_diesel_err)?; if !role_exists { return Err(ServerError::not_found("角色不存在")); } // 更新角色 - update_role_in_users_by_id( - &mut db_conn, - &update_req.user_id, - new_role_id, - ).await.map_err(map_diesel_err)?; + update_role_in_users_by_id(&mut db_conn, &update_req.user_id, new_role_id) + .await + .map_err(map_diesel_err)?; } // 获取更新后的用户信息 - let updated_user = get_data_from_users_by_id( - &mut db_conn, - &update_req.user_id, - ).await.map_err(map_diesel_err)?; + let updated_user = get_data_from_users_by_id(&mut db_conn, &update_req.user_id) + .await + .map_err(map_diesel_err)?; match updated_user { Some(user_data) => { // 直接序列化整个结构体,因为它实现了 Serialize trait Ok(serde_json::to_value(user_data)?) } - None => Err(ServerError::internal_error("获取更新后的用户信息失败")) + None => Err(ServerError::internal_error("获取更新后的用户信息失败")), } } /// 删除用户处理函数 -/// +/// /// # 功能 /// - 检查用户是否存在 /// - 删除用户记录 -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 包含用户ID的服务器请求对象 pub async fn delete_user_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 从请求参数中获取用户ID @@ -326,26 +327,24 @@ pub async fn delete_user_handler( let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); // 检查用户是否存在 - let user_exists = check_users_by_id( - &mut db_conn, - &user_id, - ).await.map_err(map_diesel_err)?; + let user_exists = check_users_by_id(&mut db_conn, &user_id) + .await + .map_err(map_diesel_err)?; if !user_exists { return Err(ServerError::duplicated("用户不存在,请检查用户ID")); } // 删除用户记录 - delete_from_users_by_id( - &mut db_conn, - &user_id, - ).await.map_err(map_diesel_err)?; + delete_from_users_by_id(&mut db_conn, &user_id) + .await + .map_err(map_diesel_err)?; // 返回被删除的用户ID Ok(json!({ @@ -354,43 +353,43 @@ pub async fn delete_user_handler( } /// 获取用户信息处理函数 -/// +/// /// # 功能 /// - 如果提供了用户ID,获取单个用户的详细信息 /// - 如果没有提供用户ID,获取所有用户的列表 -/// +/// /// # 参数 /// - app_state: 应用程序状态,包含数据库连接池等资源 /// - server_request: 服务器请求对象,可能包含用户ID参数 pub async fn get_user_handler( app_state: Arc, + _request_context: RequestContext, server_request: ServerRequest, ) -> ServerResult { // 获取数据库连接 let db_conn = app_state.db_pool.get_connection(); if let Err(e) = db_conn { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return Err(ServerError::internal_error("数据库连接池错误")); } let mut db_conn = db_conn.unwrap(); if let Some(user_id) = server_request.params.get("user_id") { // 如果提供了用户ID,获取单个用户信息 - let res = get_data_from_users_by_id( - &mut db_conn, - &user_id, - ).await.map_err(map_diesel_err)?; + let res = get_data_from_users_by_id(&mut db_conn, &user_id) + .await + .map_err(map_diesel_err)?; if res.is_none() { - return Err(ServerError::not_found("找不到该用户")) + return Err(ServerError::not_found("找不到该用户")); } Ok(serde_json::to_value(res)?) } else { // 如果没有提供用户ID,获取所有用户列表 - let res = get_all_data_from_users( - &mut db_conn - ).await.map_err(map_diesel_err)?; + let res = get_all_data_from_users(&mut db_conn) + .await + .map_err(map_diesel_err)?; Ok(serde_json::to_value(res)?) } -} \ No newline at end of file +} diff --git a/src/cores/mod.rs b/src/cores/mod.rs index a388519..a02a28c 100644 --- a/src/cores/mod.rs +++ b/src/cores/mod.rs @@ -5,7 +5,6 @@ use std::iter::zip; use std::{collections::BTreeMap, ffi::CString}; pub mod services; - pub mod daemons; #[cfg(feature = "servers")] pub mod handlers; @@ -66,7 +65,7 @@ pub struct ServerStartParams { } /// 启动 Web 服务器 -/// +/// /// 功能与 fleet/src/core/core.rs 中的 start_api_server 功能相同 /// /// # 参数 @@ -77,18 +76,18 @@ pub struct ServerStartParams { /// - 异步运行结果 #[cfg(feature = "servers")] pub async fn start_server(params: ServerStartParams) -> anyhow::Result<()> { - log::info!( + tracing::info!( "ApiServer启动中,数据库连接地址:{},actix web tcp地址:{},actix web udp地址:{}", params.database_url, params.actix_web_tcp_address.as_deref().unwrap_or("None"), params.actix_web_udp_address.as_deref().unwrap_or("None") ); - log::info!( + tracing::info!( "ApiServer启动共识协议配置:consensus_config:{:?}", params.consensus_config ); if params.consensus_config.is_none() { - log::info!("ApiServer启动共识协议配置为空,默认启用单节点配置"); + tracing::info!("ApiServer启动共识协议配置为空,默认启用单节点配置"); } let consensus_config = params.consensus_config.unwrap_or_else(|| { let address = params @@ -110,7 +109,7 @@ pub async fn start_server(params: ServerStartParams) -> anyhow::Result<()> { // 启动网络状态聚合器 if app_state.network_status_config.enabled { - log::info!( + tracing::info!( "启动网络状态聚合器,更新间隔:{}秒", app_state.network_status_config.update_interval ); @@ -119,7 +118,7 @@ pub async fn start_server(params: ServerStartParams) -> anyhow::Result<()> { services::network_status::start_aggregation(app_state_for_network.into()).await; }); } else { - log::info!("网络状态聚合器未启用"); + tracing::info!("网络状态聚合器未启用"); } // 启动各个server @@ -134,15 +133,17 @@ pub async fn start_server(params: ServerStartParams) -> anyhow::Result<()> { // Start OS Socket Server independently #[cfg(feature = "os_socket")] if let Some(server_instance) = os_socket_server { - log::info!("Preparing to spawn task for OS Socket Server (standalone)."); + tracing::info!("Preparing to spawn task for OS Socket Server (standalone)."); let app_state_os_socket = app_state.clone(); tokio::spawn(async move { - log::info!("Spawned task for OS Socket Server (standalone) has started execution."); + tracing::info!("Spawned task for OS Socket Server (standalone) has started execution."); server_instance.start(app_state_os_socket).await; - log::info!("Spawned task for OS Socket Server (standalone) has finished execution."); + tracing::info!( + "Spawned task for OS Socket Server (standalone) has finished execution." + ); }); } else { - log::info!("OS Socket Server is None, not starting it (standalone)."); + tracing::info!("OS Socket Server is None, not starting it (standalone)."); } let actix_web_addresses = vec![ @@ -165,15 +166,24 @@ pub async fn start_server(params: ServerStartParams) -> anyhow::Result<()> { .chain(actix_web_servers) .collect::>(); - log::info!("Collected {} other server(s) to start in the main loop.", collected_servers.len()); + tracing::info!( + "Collected {} other server(s) to start in the main loop.", + collected_servers.len() + ); for (index, server_instance) in collected_servers.into_iter().enumerate() { let app_state_moved = app_state.clone(); - log::info!("Preparing to spawn task for server at index {}.", index); + tracing::info!("Preparing to spawn task for server at index {}.", index); tokio::spawn(async move { - log::info!("Spawned task for server at index {} has started execution.", index); + tracing::info!( + "Spawned task for server at index {} has started execution.", + index + ); server_instance.start(app_state_moved).await; - log::info!("Spawned task for server at index {} has finished execution.", index); + tracing::info!( + "Spawned task for server at index {} has finished execution.", + index + ); }); } Ok(()) diff --git a/src/cores/router.rs b/src/cores/router.rs index fe69318..d67908b 100644 --- a/src/cores/router.rs +++ b/src/cores/router.rs @@ -1,5 +1,6 @@ use crate::cores::handlers; use crate::cores::state::AppState; +use crate::utils::request_context::RequestContext; use fleetmodv2::api_server::{ServerRequest, ServerResponse}; use std::collections::HashMap; use std::future::Future; @@ -136,16 +137,26 @@ impl RouterKey { } } -pub type Handler = - fn(Arc, ServerRequest) -> Pin + Send>>; +pub type Handler = fn( + Arc, + RequestContext, + ServerRequest, +) -> Pin + Send>>; macro_rules! pin_box_handler { ($fn_call:path) => {{ pub fn handler_boxed( app_state: Arc, + request_context: RequestContext, server_request: ServerRequest, ) -> Pin + Send>> { - Box::pin(async move { $fn_call(app_state, server_request).await.into() }) + let remote_ctx = request_context.to_remote(); + Box::pin(async move { + let ctx = remote_ctx.activate(); + let inner = ctx.clone(); + let _guard = ctx.enter(); + $fn_call(app_state, inner, server_request).await.into() + }) } handler_boxed }}; diff --git a/src/cores/servers/actix_web/mod.rs b/src/cores/servers/actix_web/mod.rs index f87c797..e051f2c 100644 --- a/src/cores/servers/actix_web/mod.rs +++ b/src/cores/servers/actix_web/mod.rs @@ -14,6 +14,7 @@ use crate::cores::servers::actix_web::udp::UDPImpl; use crate::cores::servers::Server; use crate::cores::state::AppState; use crate::utils::headers; +use crate::utils::request_context::RequestContext; use actix_web::http::header; use actix_web::http::StatusCode; use actix_web::{App, Error, HttpRequest, HttpResponse}; @@ -151,7 +152,7 @@ fn extract_params(path_params: impl Serialize, query: impl Serialize) -> HashMap let mut final_map = HashMap::new(); final_map.extend(params_map); final_map.extend(query_map); - log::debug!("extract params: {:?}", final_map); + tracing::debug!("extract params: {:?}", final_map); final_map } @@ -209,12 +210,14 @@ macro_rules! construct_request { macro_rules! call_route { ($app_state:ident, $req:ident, $router_key:path) => {{ let app_state_cloned = $app_state.clone(); - let res = $app_state - .router - .get_route($router_key) - .await - .expect("route not found")(app_state_cloned.into_inner(), $req) - .await; + let req_ctx = RequestContext::new("actix_web"); + let res = + $app_state + .router + .get_route($router_key) + .await + .expect("route not found")(app_state_cloned.into_inner(), req_ctx, $req) + .await; res }}; } diff --git a/src/cores/servers/actix_web/quic.rs b/src/cores/servers/actix_web/quic.rs index 6bc3f8c..8397231 100644 --- a/src/cores/servers/actix_web/quic.rs +++ b/src/cores/servers/actix_web/quic.rs @@ -1,8 +1,11 @@ -use log::*; +use tracing::*; use std::net; use super::*; +use crate::cores::servers::Server; +use crate::cores::state::AppState; +use crate::init_app; use actix_http::body::{BoxBody, MessageBody}; use actix_http::Version; use actix_web::test::{init_service, TestRequest}; @@ -10,9 +13,6 @@ use async_trait::async_trait; use ring::rand::*; use std::collections::HashMap; use std::str::FromStr; -use crate::cores::servers::Server; -use crate::cores::state::AppState; -use crate::init_app; use tokio::runtime::Handle; //use crate::middleware::token::TokenAuth; @@ -70,22 +70,38 @@ impl Client { .partial_requests .remove(&stream_id) .expect("Partial request not found"); - debug!("{} build_partial_response got request: {:?}", self.conn.trace_id(), request); + debug!( + "{} build_partial_response got request: {:?}", + self.conn.trace_id(), + request + ); let (headers, body) = self.build_response(request); let partial_response = PartialResponse { headers: Some(headers), body, written: 0, }; - debug!("{} built partial response: {:?}", self.conn.trace_id(), partial_response); + debug!( + "{} built partial response: {:?}", + self.conn.trace_id(), + partial_response + ); self.partial_responses.insert(stream_id, partial_response); } /// Builds an HTTP/3 response given a request. fn build_response(&mut self, request: PartialRequest) -> (Vec, Vec) { let body = request.body; - let body_string = body.as_ref().and_then(|b| String::from_utf8(b.clone()).ok()); - info!("quiche server receives request: method: {}, path: {}, headers: {:?}, body: {:?}", request.method, request.path.as_str(), request.headers, body_string); + let body_string = body + .as_ref() + .and_then(|b| String::from_utf8(b.clone()).ok()); + info!( + "quiche server receives request: method: {}, path: {}, headers: {:?}, body: {:?}", + request.method, + request.path.as_str(), + request.headers, + body_string + ); // 创建一个空的 HttpRequest // 使用 TestRequest 构造 HttpRequest let method = actix_http::Method::from_str(request.method.as_str()); @@ -568,11 +584,11 @@ fn handle_headers(client: &mut Client, stream_id: u64, headers: &[quiche::h3::He warn!("Parse content-length to int failed: {:?}", hdr); } content_length = Some(len.unwrap()); - }, + } b"content-type" => { content_type = String::from_utf8(hdr.value().to_vec()).ok(); other_headers.insert("content-type".to_string(), content_type.clone().unwrap()); - }, + } _ => { let name = String::from_utf8(hdr.name().to_vec()); let value = String::from_utf8(hdr.value().to_vec()); @@ -647,11 +663,7 @@ fn handle_data(client: &mut Client, stream_id: u64, buf: &mut [u8]) { if partial_request.body.is_none() { partial_request.body = Some(Vec::new()); } - partial_request - .body - .as_mut() - .unwrap() - .extend(data); + partial_request.body.as_mut().unwrap().extend(data); info!("handle data {} done reading", client.conn.trace_id()); client.build_partial_response(stream_id); } diff --git a/src/cores/servers/actix_web/rs422/mod.rs b/src/cores/servers/actix_web/rs422/mod.rs index ef70fa5..885287f 100644 --- a/src/cores/servers/actix_web/rs422/mod.rs +++ b/src/cores/servers/actix_web/rs422/mod.rs @@ -1,5 +1,5 @@ mod buffer; pub mod frame; -pub mod port; pub mod handle_packet; -pub mod server; \ No newline at end of file +pub mod port; +pub mod server; diff --git a/src/cores/servers/actix_web/rs422/server.rs b/src/cores/servers/actix_web/rs422/server.rs index 0f8a56d..4976f40 100644 --- a/src/cores/servers/actix_web/rs422/server.rs +++ b/src/cores/servers/actix_web/rs422/server.rs @@ -1,4 +1,4 @@ -use log::*; +use tracing::*; use async_trait::async_trait; diff --git a/src/cores/servers/actix_web/tcp.rs b/src/cores/servers/actix_web/tcp.rs index 3e1bdbc..2e94c96 100644 --- a/src/cores/servers/actix_web/tcp.rs +++ b/src/cores/servers/actix_web/tcp.rs @@ -21,7 +21,7 @@ impl TCPImpl { #[async_trait] impl Server for TCPImpl { async fn start(&self, app_state: AppState) { - log::info!("Starting TCP Actix web server at {}", self.addr); + tracing::info!("Starting TCP Actix web server at {}", self.addr); let server = HttpServer::new(move || init_app!(app_state)) .bind(self.addr.as_str()) .expect("Failed to bind Actix web server"); diff --git a/src/cores/servers/actix_web/udp.rs b/src/cores/servers/actix_web/udp.rs index 64e37c5..5eedd61 100644 --- a/src/cores/servers/actix_web/udp.rs +++ b/src/cores/servers/actix_web/udp.rs @@ -1,19 +1,19 @@ +use super::*; +use crate::cores::servers::Server; +use crate::cores::state::AppState; +use crate::init_app; use actix_http::body::MessageBody; use actix_http::{Payload, Request, RequestHead, Version}; use actix_web::dev::ServiceRequest; use actix_web::http::Method; use actix_web::test::{init_service, TestRequest}; use actix_web::{body::BoxBody, App, HttpResponse}; +use async_trait::async_trait; use bytes::Bytes; use std::collections::HashMap; use std::net::UdpSocket; use std::str; use std::sync::Arc; -use super::*; -use crate::cores::servers::Server; -use crate::cores::state::AppState; -use crate::init_app; -use async_trait::async_trait; use tokio::runtime::Handle; //use crate::middleware::token::TokenAuth; @@ -32,8 +32,10 @@ impl UDPImpl { #[async_trait] impl Server for UDPImpl { async fn start(&self, app_state: AppState) { - log::info!("Starting UDP Actix web server at {}", self.addr); - run_udp(self.addr.clone(), app_state).await.expect("Failed to start UDP server"); + tracing::info!("Starting UDP Actix web server at {}", self.addr); + run_udp(self.addr.clone(), app_state) + .await + .expect("Failed to start UDP server"); } } @@ -41,7 +43,7 @@ pub async fn run_udp(addr: String, app_state: AppState) -> anyhow::Result<()> { // 启动 UDP 服务器 let udp_socket = UdpSocket::bind(addr.clone())?; udp_socket.set_nonblocking(true)?; - log::debug!("UDP server listening on {}", addr); + tracing::debug!("UDP server listening on {}", addr); let udp_socket = Arc::new(udp_socket); let udp_socket_clone = udp_socket.clone(); let mut buf = [0; 1024]; @@ -56,11 +58,11 @@ pub async fn run_udp(addr: String, app_state: AppState) -> anyhow::Result<()> { let data = str::from_utf8(&buf[..amt]).unwrap_or(""); let http_request = to_udp_http_request(data).unwrap(); - log::debug!("Received UDP method: {}", http_request.method); - log::debug!("Received UDP uri: {}", http_request.uri); - log::debug!("Received UDP version: {}", http_request.version); - log::debug!("Received UDP headers: {:?}", http_request.headers); - log::debug!("Received UDP body: {:?}", http_request.body); + tracing::debug!("Received UDP method: {}", http_request.method); + tracing::debug!("Received UDP uri: {}", http_request.uri); + tracing::debug!("Received UDP version: {}", http_request.version); + tracing::debug!("Received UDP headers: {:?}", http_request.headers); + tracing::debug!("Received UDP body: {:?}", http_request.body); // 创建一个空的 HttpRequest // 使用 TestRequest 构造 HttpRequest let mut test_request = TestRequest::default() @@ -81,11 +83,11 @@ pub async fn run_udp(addr: String, app_state: AppState) -> anyhow::Result<()> { let response_bytes = response_to_bytes(response.into()); // 发送响应数据 match udp_socket.send_to(&response_bytes, &src) { - Ok(_) => log::debug!("Response sent to {}", src), - Err(e) => log::info!("Failed to send response: {}", e), + Ok(_) => tracing::debug!("Response sent to {}", src), + Err(e) => tracing::info!("Failed to send response: {}", e), } - log::debug!("Response sent to {}", src); + tracing::debug!("Response sent to {}", src); } } }); diff --git a/src/cores/servers/actix_web/utils.rs b/src/cores/servers/actix_web/utils.rs index 6d93e09..be660d5 100644 --- a/src/cores/servers/actix_web/utils.rs +++ b/src/cores/servers/actix_web/utils.rs @@ -18,4 +18,4 @@ pub(super) fn http_request_to_actix_http(req: TestRequest) -> Request { *request.head_mut() = head; request -} \ No newline at end of file +} diff --git a/src/cores/servers/message.rs b/src/cores/servers/message.rs index d6b4b94..739f028 100644 --- a/src/cores/servers/message.rs +++ b/src/cores/servers/message.rs @@ -1,6 +1,7 @@ use crate::cores::router::RouterKey; use crate::cores::servers::Server; use crate::cores::state::AppState; +use crate::utils::request_context::RequestContext; use async_trait::async_trait; use feventbus::err::Error as FEventBusError; use feventbus::message::Message; @@ -17,14 +18,14 @@ pub struct MessagingServer; #[async_trait] impl Server for MessagingServer { async fn start(&self, app_state: AppState) { - log::info!("Starting Messaging Server"); + tracing::info!("Starting Messaging Server"); let msg_cli = app_state.message_cli.clone(); let mut topics = Vec::new(); let p2p_topics = P2PEventTopic::create_p2p_topic(app_state.cluster_id.clone()); for p2p in p2p_topics { topics.push(EventTopic::P2P(p2p.clone())); } - log::debug!("Before registering reply handlers, topics: {:?}", topics); + tracing::debug!("Before registering reply handlers, topics: {:?}", topics); for t in topics { let topic_str = t.to_string(); let p2p_topic = match t { @@ -33,9 +34,9 @@ impl Server for MessagingServer { }; let reply_handler = Self::get_reply_handler(app_state.clone(), p2p_topic); let msg_cli = msg_cli.clone(); - log::info!("Registering reply handler for topic {}", topic_str); + tracing::info!("Registering reply handler for topic {}", topic_str); if let Err(e) = msg_cli.reply(topic_str.as_str(), reply_handler).await { - log::error!( + tracing::error!( "Failed to register reply handler for topic {}: {}", topic_str, e @@ -43,7 +44,7 @@ impl Server for MessagingServer { } } tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; - log::info!("Messaging Server started."); + tracing::info!("Messaging Server started."); } } @@ -55,7 +56,7 @@ impl MessagingServer { ) -> Result { let handler = app_state.router.get_route(router_key).await; if handler.is_none() { - log::error!( + tracing::error!( "No handler found for router_key: {}", router_key.to_string() ); @@ -63,10 +64,11 @@ impl MessagingServer { "No handler found".to_string(), )); } - let res: ServerResponse = handler.unwrap()(app_state.into(), request).await; + let req_ctx = RequestContext::new("MessagingServer"); + let res: ServerResponse = handler.unwrap()(app_state.into(), req_ctx, request).await; match res { ServerResponse::Json(res) => Ok(serde_json::to_string(&res).map_err(|e| { - log::error!("Failed to convert json response to string: {}", e); + tracing::error!("Failed to convert json response to string: {}", e); FEventBusError::MessageHandling(format!( "Failed to convert json response to string: {}", e @@ -74,7 +76,7 @@ impl MessagingServer { })?), ServerResponse::Raw(res) => Ok(String::from_utf8(res.body.unwrap_or(Vec::default())) .map_err(|e| { - log::error!("Failed to convert raw response body to string: {}", e); + tracing::error!("Failed to convert raw response body to string: {}", e); FEventBusError::MessageHandling(format!( "Failed to convert raw response body to string: {}", e @@ -108,7 +110,7 @@ impl MessagingServer { msg_body: Value, ) -> Result { let body = serde_json::to_vec(&msg_body).map_err(|e: SerdeError| { - log::error!("Failed to convert json response to vec: {}", e); + tracing::error!("Failed to convert json response to vec: {}", e); FEventBusError::MessageHandling(e.to_string()) })?; let request: ServerRequest = ServerRequest { @@ -125,7 +127,7 @@ impl MessagingServer { let app_state = app_state.clone(); Box::pin(async move { if msg.body.is_none() { - log::error!("reply failed since message body is null"); + tracing::error!("reply failed since message body is null"); return Err(FEventBusError::MessageHandling( "Message body is null".to_string(), )); @@ -138,7 +140,7 @@ impl MessagingServer { } else if RouterKey::is_consensus_router_key(&router_key) { Self::do_consensus_reply(router_key, headers, app_state, body).await } else { - log::error!("Invalid router key: {}", router_key.to_string()); + tracing::error!("Invalid router key: {}", router_key.to_string()); Err(FEventBusError::MessageHandling( "Invalid router key".to_string(), )) diff --git a/src/cores/servers/mod.rs b/src/cores/servers/mod.rs index 905ee5c..69540e8 100644 --- a/src/cores/servers/mod.rs +++ b/src/cores/servers/mod.rs @@ -17,18 +17,20 @@ pub struct OSSocketServerManager; #[async_trait] impl Server for OSSocketServerManager { async fn start(&self, app_state: AppState) { - log::info!("Attempting to start OS Socket Server Manager (using os_socket_comms handler)..."); + tracing::info!( + "Attempting to start OS Socket Server Manager (using os_socket_comms handler)..." + ); match handler::OSSocketHandler::<_, os_socket_comms::OSSocketServer>::new_with_default_server(app_state.message_cli.clone()).await { Ok(Some(handler)) => { - log::info!("OSSocketHandler (from os_socket_comms) created successfully, starting handler."); + tracing::info!("OSSocketHandler (from os_socket_comms) created successfully, starting handler."); Arc::new(handler).start().await; } Ok(None) => { - log::warn!("OSSocketHandler::new_with_default_server returned None, OS socket service will not start. This typically means the OS socket file was not found or connection failed after retries."); + tracing::warn!("OSSocketHandler::new_with_default_server returned None, OS socket service will not start. This typically means the OS socket file was not found or connection failed after retries."); } Err(e) => { - log::error!("Failed to create OSSocketHandler: {}. OS socket service will not start.", e); + tracing::error!("Failed to create OSSocketHandler: {}. OS socket service will not start.", e); } } } -} \ No newline at end of file +} diff --git a/src/cores/services/mod.rs b/src/cores/services/mod.rs index 4e7feea..6f2f93c 100644 --- a/src/cores/services/mod.rs +++ b/src/cores/services/mod.rs @@ -1 +1 @@ -pub mod network_status; \ No newline at end of file +pub mod network_status; diff --git a/src/cores/services/network_status.rs b/src/cores/services/network_status.rs index 0ebbc10..9e9042d 100644 --- a/src/cores/services/network_status.rs +++ b/src/cores/services/network_status.rs @@ -1,5 +1,5 @@ +use crate::cores::handlers::network_status::models::{NewConnectivityTest, NewNetworkInterface}; use crate::cores::state::AppState; -use crate::cores::handlers::network_status::models::{NewNetworkInterface, NewConnectivityTest}; use crate::db::network_status_ops; use network_info::{self}; use std::sync::Arc; @@ -11,7 +11,7 @@ async fn update_network_interfaces(app_state: &Arc) { let mut db_conn = match app_state.db_pool.get_connection() { Ok(conn) => conn, Err(e) => { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return; } }; @@ -25,10 +25,12 @@ async fn update_network_interfaces(app_state: &Arc) { speed_mbps: interface.speed_mbps, }; - if let Err(e) = network_status_ops::create_network_interface(&mut db_conn, &new_interface).await { - log::error!("更新网络接口失败: {}", e); + if let Err(e) = + network_status_ops::create_network_interface(&mut db_conn, &new_interface).await + { + tracing::error!("更新网络接口失败: {}", e); } else { - log::info!("成功更新网络接口"); + tracing::info!("成功更新网络接口"); } } } @@ -38,7 +40,7 @@ async fn update_connectivity_tests(app_state: &Arc, target_ips: Vec conn, Err(e) => { - log::error!("获取数据库连接失败: {}", e); + tracing::error!("获取数据库连接失败: {}", e); return; } }; @@ -50,10 +52,12 @@ async fn update_connectivity_tests(app_state: &Arc, target_ips: Vec {}", target_ip, is_connected); + tracing::info!("成功更新连通性测试: {} -> {}", target_ip, is_connected); } } } @@ -65,21 +69,25 @@ async fn update_connectivity_tests(app_state: &Arc, target_ips: Vec) { - log::info!("网络状态聚合器已启动"); - + tracing::info!("网络状态聚合器已启动"); + // 使用配置的更新间隔 let update_interval = app_state.network_status_config.update_interval; let mut interval = time::interval(Duration::from_secs(update_interval)); loop { interval.tick().await; - + // 更新网络接口信息 update_network_interfaces(&app_state).await; - + // 更新连通性测试结果 - update_connectivity_tests(&app_state, app_state.network_status_config.target_ips.clone()).await; - - log::info!("网络状态更新完成"); + update_connectivity_tests( + &app_state, + app_state.network_status_config.target_ips.clone(), + ) + .await; + + tracing::info!("网络状态更新完成"); } -} \ No newline at end of file +} diff --git a/src/cores/state.rs b/src/cores/state.rs index fc72a62..0982f4d 100644 --- a/src/cores/state.rs +++ b/src/cores/state.rs @@ -21,7 +21,7 @@ impl Default for NetworkStatusConfig { fn default() -> Self { Self { update_interval: 60, // 默认60秒更新一次 - enabled: true, // 默认启用 + enabled: true, // 默认启用 target_ips: vec![ "127.0.0.1".to_string(), // 本地回环地址 ], diff --git a/src/db/README.md b/src/db/README.md index 64178d1..9c5ab93 100644 --- a/src/db/README.md +++ b/src/db/README.md @@ -3,13 +3,17 @@ ## 添加新的数据库表流程 ### 1. 创建迁移文件 + 在 `src/db/migrations` 目录下创建新的迁移文件: + ```bash touch src/db/migrations/$(date +%Y%m%d%H%M%S)_create_table_name.sql ``` ### 2. 编写迁移文件 + 创建表结构,例如网络接口表: + ```sql -- up.sql CREATE TABLE network_interfaces ( @@ -28,7 +32,9 @@ DROP TABLE IF EXISTS network_interfaces; ``` ### 3. 创建模型 + 在 `src/db/models` 目录下创建对应的 Rust 结构体: + ```rust #[derive(Debug, Serialize, Deserialize)] pub struct NetworkInterface { @@ -44,7 +50,9 @@ pub struct NetworkInterface { ``` ### 4. 实现数据库操作 + 在 `src/db/ops` 目录下实现 CRUD 操作: + ```rust pub fn create_network_interface( conn: &mut PgConnection, @@ -57,12 +65,15 @@ pub fn create_network_interface( ``` ### 5. 运行迁移 + ```bash diesel migration run ``` ### 6. 添加测试 + 在 `src/db/tests` 目录下添加测试用例: + ```rust #[test] fn test_create_and_get_network_interface() { diff --git a/src/db/check_exist.rs b/src/db/check_exist.rs index 0538e7d..64707b5 100644 --- a/src/db/check_exist.rs +++ b/src/db/check_exist.rs @@ -1,4 +1,8 @@ use crate::db::db::DbConnection; +use crate::schema::routermgr; +use diesel::deserialize::QueryableByName; +use diesel::sql_types::BigInt; +use diesel::sql_types::Text; /** * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn @@ -6,10 +10,6 @@ use crate::db::db::DbConnection; * **/ use diesel::{ExpressionMethods, QueryDsl, QueryResult, RunQueryDsl}; -use crate::schema::routermgr; -use diesel::sql_types::Text; -use diesel::deserialize::QueryableByName; -use diesel::sql_types::{BigInt}; #[derive(QueryableByName, Debug)] struct CountResult { @@ -200,10 +200,7 @@ pub async fn check_kine( } // 查询 users 表中指定的数据是否存在 -pub async fn check_users_by_name( - conn: &mut DbConnection, - username: &str, -) -> QueryResult { +pub async fn check_users_by_name(conn: &mut DbConnection, username: &str) -> QueryResult { use crate::schema::users::dsl as users_dsl; use crate::schema::users_replica1::dsl as replica1_dsl; use crate::schema::users_replica2::dsl as replica2_dsl; @@ -253,10 +250,7 @@ pub async fn check_users_by_name( Ok(positive_count >= 2) } -pub async fn check_users_by_id( - conn: &mut DbConnection, - user_id: &str, -) -> QueryResult { +pub async fn check_users_by_id(conn: &mut DbConnection, user_id: &str) -> QueryResult { use crate::schema::users::dsl as users_dsl; use crate::schema::users_replica1::dsl as replica1_dsl; use crate::schema::users_replica2::dsl as replica2_dsl; @@ -373,10 +367,7 @@ pub async fn check_routermgr( Ok(positive_count >= 2) } -pub async fn check_roles_by_id( - conn: &mut DbConnection, - role_id: &str, -) -> QueryResult { +pub async fn check_roles_by_id(conn: &mut DbConnection, role_id: &str) -> QueryResult { use crate::schema::roles::dsl as roles_dsl; use crate::schema::roles_replica1::dsl as replica1_dsl; use crate::schema::roles_replica2::dsl as replica2_dsl; @@ -484,7 +475,11 @@ pub async fn check_role_permission_exists( role_id: &str, permission_id: &str, ) -> QueryResult { - let tables = ["role_permissions", "role_permissions_replica1", "role_permissions_replica2"]; + let tables = [ + "role_permissions", + "role_permissions_replica1", + "role_permissions_replica2", + ]; let mut counts = Vec::new(); for table in tables { @@ -494,12 +489,10 @@ pub async fn check_role_permission_exists( ); let count: CountResult = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query) - .bind::(role_id) - .bind::(permission_id) - .get_result(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query) + .bind::(role_id) + .bind::(permission_id) + .get_result(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { let query = query.replace("$1", "?").replace("$2", "?"); diesel::sql_query(&query) @@ -513,4 +506,4 @@ pub async fn check_role_permission_exists( let positive_count = counts.iter().filter(|&&x| x > 0).count(); Ok(positive_count >= 2) -} \ No newline at end of file +} diff --git a/src/db/delete.rs b/src/db/delete.rs index 6cadeac..46b6e2d 100644 --- a/src/db/delete.rs +++ b/src/db/delete.rs @@ -1,4 +1,5 @@ use crate::db::db::DbConnection; +use diesel::sql_types::{Integer, Text}; /** * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn @@ -6,7 +7,6 @@ use crate::db::db::DbConnection; * **/ use diesel::{QueryResult, RunQueryDsl}; -use diesel::sql_types::{Text, Integer}; // 从 kine 表中删除特定 name 的记录 pub async fn delete_from_kine( @@ -16,7 +16,6 @@ pub async fn delete_from_kine( item_version: &str, item_namespace: Option<&str>, ) -> QueryResult { - // 表名列表 let tables = ["kine", "kine_replica1", "kine_replica2"]; @@ -91,11 +90,7 @@ pub async fn delete_from_kine( Ok(total_rows_affected > 1) } -pub async fn delete_from_users_by_id( - conn: &mut DbConnection, - user_id: &str, -) -> QueryResult { - +pub async fn delete_from_users_by_id(conn: &mut DbConnection, user_id: &str) -> QueryResult { // 表名列表 let tables = ["users", "users_replica1", "users_replica2"]; @@ -104,28 +99,18 @@ pub async fn delete_from_users_by_id( for &table in &tables { let delete_query = match conn { - DbConnection::Pg(_) => format!( - "DELETE FROM {} WHERE user_id = $1", - table - ), - DbConnection::Sqlite(_) => format!( - "DELETE FROM {} WHERE user_id = ?", - table - ), + DbConnection::Pg(_) => format!("DELETE FROM {} WHERE user_id = $1", table), + DbConnection::Sqlite(_) => format!("DELETE FROM {} WHERE user_id = ?", table), }; // 执行删除 let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(delete_query) - .bind::(user_id) - .execute(pg_conn)? - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(delete_query) - .bind::(user_id) - .execute(sqlite_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(delete_query) + .bind::(user_id) + .execute(pg_conn)?, + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(delete_query) + .bind::(user_id) + .execute(sqlite_conn)?, }; total_rows_affected += rows_affected; @@ -141,7 +126,6 @@ pub async fn delete_from_routermgr( dst_ip: &str, prefix_len: i32, ) -> QueryResult { - // 表名列表 let tables = ["routermgr", "routermgr_replica1", "routermgr_replica2"]; @@ -162,20 +146,16 @@ pub async fn delete_from_routermgr( // 执行删除 let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(delete_query) - .bind::(msg_from) - .bind::(dst_ip) - .bind::(prefix_len) - .execute(pg_conn)? - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(delete_query) - .bind::(msg_from) - .bind::(dst_ip) - .bind::(prefix_len) - .execute(sqlite_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(delete_query) + .bind::(msg_from) + .bind::(dst_ip) + .bind::(prefix_len) + .execute(pg_conn)?, + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(delete_query) + .bind::(msg_from) + .bind::(dst_ip) + .bind::(prefix_len) + .execute(sqlite_conn)?, }; total_rows_affected += rows_affected; @@ -190,39 +170,36 @@ pub async fn delete_from_payload( cluster_id: &str, kind: &str, ) -> QueryResult { - // 表名列表 - let tables = ["payload_data", "payload_data_replica1", "payload_data_replica2"]; + let tables = [ + "payload_data", + "payload_data_replica1", + "payload_data_replica2", + ]; // 遍历每个表,执行删除操作 let mut total_rows_affected = 0; for &table in &tables { let delete_query = match conn { - DbConnection::Pg(_) => format!( - "DELETE FROM {} WHERE cluster_id = $1 AND kind = $2", - table - ), - DbConnection::Sqlite(_) => format!( - "DELETE FROM {} WHERE cluster_id = ? AND kind = ?", - table - ), + DbConnection::Pg(_) => { + format!("DELETE FROM {} WHERE cluster_id = $1 AND kind = $2", table) + } + DbConnection::Sqlite(_) => { + format!("DELETE FROM {} WHERE cluster_id = ? AND kind = ?", table) + } }; // 执行删除 let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(delete_query) - .bind::(cluster_id) - .bind::(kind) - .execute(pg_conn)? - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(delete_query) - .bind::(cluster_id) - .bind::(kind) - .execute(sqlite_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(delete_query) + .bind::(cluster_id) + .bind::(kind) + .execute(pg_conn)?, + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(delete_query) + .bind::(cluster_id) + .bind::(kind) + .execute(sqlite_conn)?, }; total_rows_affected += rows_affected; @@ -232,17 +209,11 @@ pub async fn delete_from_payload( Ok(total_rows_affected > 1) } -pub async fn delete_from_roles_by_id( - conn: &mut DbConnection, - role_id: &str, -) -> QueryResult<()> { +pub async fn delete_from_roles_by_id(conn: &mut DbConnection, role_id: &str) -> QueryResult<()> { let tables = ["roles", "roles_replica1", "roles_replica2"]; for table in tables { - let query = format!( - "DELETE FROM {} WHERE role_id = $1", - table - ); + let query = format!("DELETE FROM {} WHERE role_id = $1", table); match conn { DbConnection::Pg(pg_conn) => { @@ -251,10 +222,7 @@ pub async fn delete_from_roles_by_id( .execute(pg_conn)?; } DbConnection::Sqlite(sqlite_conn) => { - let query = format!( - "DELETE FROM {} WHERE role_id = ?", - table - ); + let query = format!("DELETE FROM {} WHERE role_id = ?", table); diesel::sql_query(&query) .bind::(role_id) .execute(sqlite_conn)?; @@ -268,13 +236,14 @@ pub async fn delete_from_permissions_by_id( conn: &mut DbConnection, permission_id: &str, ) -> QueryResult<()> { - let tables = ["permissions", "permissions_replica1", "permissions_replica2"]; + let tables = [ + "permissions", + "permissions_replica1", + "permissions_replica2", + ]; for table in tables { - let query = format!( - "DELETE FROM {} WHERE permission_id = $1", - table - ); + let query = format!("DELETE FROM {} WHERE permission_id = $1", table); match conn { DbConnection::Pg(pg_conn) => { @@ -283,10 +252,7 @@ pub async fn delete_from_permissions_by_id( .execute(pg_conn)?; } DbConnection::Sqlite(sqlite_conn) => { - let query = format!( - "DELETE FROM {} WHERE permission_id = ?", - table - ); + let query = format!("DELETE FROM {} WHERE permission_id = ?", table); diesel::sql_query(&query) .bind::(permission_id) .execute(sqlite_conn)?; @@ -301,7 +267,11 @@ pub async fn delete_role_permission( role_id: &str, permission_id: &str, ) -> QueryResult<()> { - let tables = ["role_permissions", "role_permissions_replica1", "role_permissions_replica2"]; + let tables = [ + "role_permissions", + "role_permissions_replica1", + "role_permissions_replica2", + ]; for table in tables { let query = format!( @@ -329,4 +299,4 @@ pub async fn delete_role_permission( } } Ok(()) -} \ No newline at end of file +} diff --git a/src/db/get.rs b/src/db/get.rs index 48bfe10..4131939 100644 --- a/src/db/get.rs +++ b/src/db/get.rs @@ -1,6 +1,6 @@ -use std::collections::HashMap; use crate::db::db::DbConnection; -use diesel::sql_types::{Text, Nullable}; +use diesel::sql_types::{Nullable, Text}; +use std::collections::HashMap; /** * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences @@ -11,6 +11,7 @@ use diesel::sql_types::{Text, Nullable}; use diesel::{OptionalExtension, QueryResult, QueryableByName, RunQueryDsl}; use serde::Serialize; + // 辅助查询函数,用于获取数据的 `data` 字段 #[derive(QueryableByName)] struct DataResult { @@ -334,31 +335,21 @@ pub async fn get_password_from_users_by_name( for &table in &tables { let select_query = match conn { - DbConnection::Pg(_) => format!( - "SELECT password FROM {} WHERE username = $1", - table - ), - DbConnection::Sqlite(_) => format!( - "SELECT password FROM {} WHERE username = ?", - table - ), + DbConnection::Pg(_) => format!("SELECT password FROM {} WHERE username = $1", table), + DbConnection::Sqlite(_) => format!("SELECT password FROM {} WHERE username = ?", table), }; let data_result = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(select_query) - .bind::(username) - .get_result::(pg_conn) - .optional()? - .map(|res| res.password) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(select_query) - .bind::(username) - .get_result::(sqlite_conn) - .optional()? - .map(|res| res.password) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(select_query) + .bind::(username) + .get_result::(pg_conn) + .optional()? + .map(|res| res.password), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(select_query) + .bind::(username) + .get_result::(sqlite_conn) + .optional()? + .map(|res| res.password), }; if let Some(data) = data_result { @@ -387,29 +378,21 @@ fn get_password_from_users_primary_by_name( username: &str, ) -> QueryResult> { let fallback_query = match conn { - DbConnection::Pg(_) => { - "SELECT password FROM users WHERE username = $1".to_string() - } - DbConnection::Sqlite(_) => { - "SELECT password FROM users WHERE username = ?".to_string() - } + DbConnection::Pg(_) => "SELECT password FROM users WHERE username = $1".to_string(), + DbConnection::Sqlite(_) => "SELECT password FROM users WHERE username = ?".to_string(), }; match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(fallback_query) - .bind::(username) - .get_result::(pg_conn) - .optional() - .map(|res| res.map(|data_result| data_result.password)) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(fallback_query) - .bind::(username) - .get_result::(sqlite_conn) - .optional() - .map(|res| res.map(|data_result| data_result.password)) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(fallback_query) + .bind::(username) + .get_result::(pg_conn) + .optional() + .map(|res| res.map(|data_result| data_result.password)), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(fallback_query) + .bind::(username) + .get_result::(sqlite_conn) + .optional() + .map(|res| res.map(|data_result| data_result.password)), } } @@ -434,31 +417,21 @@ pub async fn get_id_from_users_by_name( for &table in &tables { let select_query = match conn { - DbConnection::Pg(_) => format!( - "SELECT user_id FROM {} WHERE username = $1", - table - ), - DbConnection::Sqlite(_) => format!( - "SELECT user_id FROM {} WHERE username = ?", - table - ), + DbConnection::Pg(_) => format!("SELECT user_id FROM {} WHERE username = $1", table), + DbConnection::Sqlite(_) => format!("SELECT user_id FROM {} WHERE username = ?", table), }; let data_result = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(select_query) - .bind::(username) - .get_result::(pg_conn) - .optional()? - .map(|res| res.user_id) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(select_query) - .bind::(username) - .get_result::(sqlite_conn) - .optional()? - .map(|res| res.user_id) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(select_query) + .bind::(username) + .get_result::(pg_conn) + .optional()? + .map(|res| res.user_id), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(select_query) + .bind::(username) + .get_result::(sqlite_conn) + .optional()? + .map(|res| res.user_id), }; if let Some(data) = data_result { @@ -487,29 +460,21 @@ fn get_id_from_users_primary_by_name( username: &str, ) -> QueryResult> { let fallback_query = match conn { - DbConnection::Pg(_) => { - "SELECT user_id FROM users WHERE username = $1".to_string() - } - DbConnection::Sqlite(_) => { - "SELECT user_id FROM users WHERE username = ?".to_string() - } + DbConnection::Pg(_) => "SELECT user_id FROM users WHERE username = $1".to_string(), + DbConnection::Sqlite(_) => "SELECT user_id FROM users WHERE username = ?".to_string(), }; match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(fallback_query) - .bind::(username) - .get_result::(pg_conn) - .optional() - .map(|res| res.map(|data_result| data_result.user_id)) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(fallback_query) - .bind::(username) - .get_result::(sqlite_conn) - .optional() - .map(|res| res.map(|data_result| data_result.user_id)) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(fallback_query) + .bind::(username) + .get_result::(pg_conn) + .optional() + .map(|res| res.map(|data_result| data_result.user_id)), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(fallback_query) + .bind::(username) + .get_result::(sqlite_conn) + .optional() + .map(|res| res.map(|data_result| data_result.user_id)), } } @@ -536,9 +501,7 @@ pub async fn get_all_data_from_users(conn: &mut DbConnection) -> QueryResult = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query).get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query).get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { diesel::sql_query(&query).get_results(sqlite_conn)? } @@ -561,20 +524,15 @@ pub async fn get_all_data_from_users(conn: &mut DbConnection) -> QueryResult QueryResult> { - +fn get_all_data_from_users_primary(conn: &mut DbConnection) -> QueryResult> { let query = "SELECT user_id, username, created_time, updated_time FROM users"; match conn { DbConnection::Pg(pg_conn) => { - diesel::sql_query(query) - .get_results::(pg_conn) + diesel::sql_query(query).get_results::(pg_conn) } DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(query) - .get_results::(sqlite_conn) + diesel::sql_query(query).get_results::(sqlite_conn) } } } @@ -605,20 +563,16 @@ pub async fn get_data_from_users_by_id( }; let data_result = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(select_query) - .bind::(user_id) - .get_result::(pg_conn) - .optional()? - .map(|res| res) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(select_query) - .bind::(user_id) - .get_result::(sqlite_conn) - .optional()? - .map(|res| res) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(select_query) + .bind::(user_id) + .get_result::(pg_conn) + .optional()? + .map(|res| res), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(select_query) + .bind::(user_id) + .get_result::(sqlite_conn) + .optional()? + .map(|res| res), }; if let Some(data) = data_result { @@ -648,28 +602,26 @@ fn get_data_from_users_primary_by_id( ) -> QueryResult> { let fallback_query = match conn { DbConnection::Pg(_) => { - "SELECT user_id, username, created_time, updated_time FROM users WHERE user_id = $1".to_string() + "SELECT user_id, username, created_time, updated_time FROM users WHERE user_id = $1" + .to_string() } DbConnection::Sqlite(_) => { - "SELECT user_id, username, created_time, updated_time FROM users WHERE user_id = ?".to_string() + "SELECT user_id, username, created_time, updated_time FROM users WHERE user_id = ?" + .to_string() } }; match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(fallback_query) - .bind::(user_id) - .get_result::(pg_conn) - .optional() - .map(|res| res.map(|data_result| data_result)) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(fallback_query) - .bind::(user_id) - .get_result::(sqlite_conn) - .optional() - .map(|res| res.map(|data_result| data_result)) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(fallback_query) + .bind::(user_id) + .get_result::(pg_conn) + .optional() + .map(|res| res.map(|data_result| data_result)), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(fallback_query) + .bind::(user_id) + .get_result::(sqlite_conn) + .optional() + .map(|res| res.map(|data_result| data_result)), } } @@ -690,7 +642,9 @@ pub struct SecurityInfoResult { measure_time: String, } -pub async fn get_all_data_from_security(conn: &mut DbConnection) -> QueryResult> { +pub async fn get_all_data_from_security( + conn: &mut DbConnection, +) -> QueryResult> { let tables = ["security", "security_replica1", "security_replica2"]; let mut counter: HashMap = HashMap::new(); @@ -701,9 +655,7 @@ pub async fn get_all_data_from_security(conn: &mut DbConnection) -> QueryResult< ); let rows: Vec = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query).get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query).get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { diesel::sql_query(&query).get_results(sqlite_conn)? } @@ -729,17 +681,15 @@ pub async fn get_all_data_from_security(conn: &mut DbConnection) -> QueryResult< fn get_all_data_from_security_primary( conn: &mut DbConnection, ) -> QueryResult> { - - let query = "SELECT uid, type, container_id, image_id, security_value, measure_time FROM security"; + let query = + "SELECT uid, type, container_id, image_id, security_value, measure_time FROM security"; match conn { DbConnection::Pg(pg_conn) => { - diesel::sql_query(query) - .get_results::(pg_conn) + diesel::sql_query(query).get_results::(pg_conn) } DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(query) - .get_results::(sqlite_conn) + diesel::sql_query(query).get_results::(sqlite_conn) } } } @@ -770,20 +720,16 @@ pub async fn get_data_from_security_by_container_id( }; let data_result = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(select_query) - .bind::(container_id) - .get_result::(pg_conn) - .optional()? - .map(|res| res) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(select_query) - .bind::(container_id) - .get_result::(sqlite_conn) - .optional()? - .map(|res| res) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(select_query) + .bind::(container_id) + .get_result::(pg_conn) + .optional()? + .map(|res| res), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(select_query) + .bind::(container_id) + .get_result::(sqlite_conn) + .optional()? + .map(|res| res), }; if let Some(data) = data_result { @@ -821,20 +767,16 @@ fn get_data_from_security_primary_by_container_id( }; match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(fallback_query) - .bind::(container_id) - .get_result::(pg_conn) - .optional() - .map(|res| res.map(|data_result| data_result)) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(fallback_query) - .bind::(container_id) - .get_result::(sqlite_conn) - .optional() - .map(|res| res.map(|data_result| data_result)) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(fallback_query) + .bind::(container_id) + .get_result::(pg_conn) + .optional() + .map(|res| res.map(|data_result| data_result)), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(fallback_query) + .bind::(container_id) + .get_result::(sqlite_conn) + .optional() + .map(|res| res.map(|data_result| data_result)), } } @@ -864,20 +806,16 @@ pub async fn get_data_from_security_by_image_id( }; let data_result = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(select_query) - .bind::(image_id) - .get_result::(pg_conn) - .optional()? - .map(|res| res) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(select_query) - .bind::(image_id) - .get_result::(sqlite_conn) - .optional()? - .map(|res| res) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(select_query) + .bind::(image_id) + .get_result::(pg_conn) + .optional()? + .map(|res| res), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(select_query) + .bind::(image_id) + .get_result::(sqlite_conn) + .optional()? + .map(|res| res), }; if let Some(data) = data_result { @@ -915,26 +853,21 @@ fn get_data_from_security_primary_by_image_id( }; match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(fallback_query) - .bind::(image_id) - .get_result::(pg_conn) - .optional() - .map(|res| res.map(|data_result| data_result)) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(fallback_query) - .bind::(image_id) - .get_result::(sqlite_conn) - .optional() - .map(|res| res.map(|data_result| data_result)) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(fallback_query) + .bind::(image_id) + .get_result::(pg_conn) + .optional() + .map(|res| res.map(|data_result| data_result)), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(fallback_query) + .bind::(image_id) + .get_result::(sqlite_conn) + .optional() + .map(|res| res.map(|data_result| data_result)), } } - -use serde::Deserialize; use diesel::sql_types::Integer; +use serde::Deserialize; #[derive(Debug, QueryableByName, Deserialize, Serialize, Clone, PartialEq, Eq, Hash)] pub struct RouterMgrInfoResult { #[diesel(sql_type = Text)] @@ -955,8 +888,6 @@ pub struct RouterMgrInfoResult { // op_code: i32 } - - impl RouterMgrInfoResult { pub fn msg_from(&self) -> &str { &self.msg_from @@ -983,7 +914,6 @@ impl RouterMgrInfoResult { // } } - pub async fn query_all_routermgr( conn: &mut DbConnection, ) -> Result, diesel::result::Error> { @@ -1038,12 +968,10 @@ pub async fn query_all_routermgr( // 执行查询 let results: Vec = match conn { DbConnection::Pg(pg_conn) => { - diesel::sql_query(&select_query) - .load::(pg_conn)? + diesel::sql_query(&select_query).load::(pg_conn)? } DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(&select_query) - .load::(sqlite_conn)? + diesel::sql_query(&select_query).load::(sqlite_conn)? } }; @@ -1085,7 +1013,11 @@ pub async fn get_data_from_payload( cluster_id: &str, kind: &str, ) -> QueryResult> { - let tables = ["payload_data", "payload_data_replica1", "payload_data_replica2"]; + let tables = [ + "payload_data", + "payload_data_replica1", + "payload_data_replica2", + ]; let mut count_map: HashMap = HashMap::new(); for &table in &tables { @@ -1101,18 +1033,14 @@ pub async fn get_data_from_payload( }; let results = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&select_query) - .bind::(cluster_id) - .bind::(kind) - .get_results::(pg_conn)? - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(&select_query) - .bind::(cluster_id) - .bind::(kind) - .get_results::(sqlite_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&select_query) + .bind::(cluster_id) + .bind::(kind) + .get_results::(pg_conn)?, + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(&select_query) + .bind::(cluster_id) + .bind::(kind) + .get_results::(sqlite_conn)?, }; for record in results { @@ -1148,18 +1076,14 @@ fn get_all_from_payload_primary( }; match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(query) - .bind::(cluster_id) - .bind::(kind) - .get_results::(pg_conn) - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(query) - .bind::(cluster_id) - .bind::(kind) - .get_results::(sqlite_conn) - } + DbConnection::Pg(pg_conn) => diesel::sql_query(query) + .bind::(cluster_id) + .bind::(kind) + .get_results::(pg_conn), + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(query) + .bind::(cluster_id) + .bind::(kind) + .get_results::(sqlite_conn), } } #[derive(QueryableByName, Debug, Hash, Eq, PartialEq, Serialize)] @@ -1187,9 +1111,7 @@ pub async fn get_all_data_from_roles(conn: &mut DbConnection) -> QueryResult = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query).get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query).get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { diesel::sql_query(&query).get_results(sqlite_conn)? } @@ -1212,19 +1134,15 @@ pub async fn get_all_data_from_roles(conn: &mut DbConnection) -> QueryResult QueryResult> { +fn get_all_data_from_roles_primary(conn: &mut DbConnection) -> QueryResult> { let query = "SELECT role_id, role_name, description, created_time, updated_time FROM roles"; match conn { DbConnection::Pg(pg_conn) => { - diesel::sql_query(query) - .get_results::(pg_conn) + diesel::sql_query(query).get_results::(pg_conn) } DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(query) - .get_results::(sqlite_conn) + diesel::sql_query(query).get_results::(sqlite_conn) } } } @@ -1243,11 +1161,9 @@ pub async fn get_data_from_roles_by_id( ); let rows: Vec = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query) - .bind::(role_id) - .get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query) + .bind::(role_id) + .get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { let query = query.replace("$1", "?"); diesel::sql_query(&query) @@ -1284,8 +1200,14 @@ pub struct PermissionInfoResult { pub updated_time: Option, } -pub async fn get_all_data_from_permissions(conn: &mut DbConnection) -> QueryResult> { - let tables = ["permissions", "permissions_replica1", "permissions_replica2"]; +pub async fn get_all_data_from_permissions( + conn: &mut DbConnection, +) -> QueryResult> { + let tables = [ + "permissions", + "permissions_replica1", + "permissions_replica2", + ]; let mut counter: HashMap = HashMap::new(); for table in tables { @@ -1295,9 +1217,7 @@ pub async fn get_all_data_from_permissions(conn: &mut DbConnection) -> QueryResu ); let rows: Vec = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query).get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query).get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { diesel::sql_query(&query).get_results(sqlite_conn)? } @@ -1327,12 +1247,10 @@ fn get_all_data_from_permissions_primary( match conn { DbConnection::Pg(pg_conn) => { - diesel::sql_query(query) - .get_results::(pg_conn) + diesel::sql_query(query).get_results::(pg_conn) } DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(query) - .get_results::(sqlite_conn) + diesel::sql_query(query).get_results::(sqlite_conn) } } } @@ -1340,7 +1258,11 @@ pub async fn get_data_from_permissions_by_id( conn: &mut DbConnection, permission_id: &str, ) -> QueryResult> { - let tables = ["permissions", "permissions_replica1", "permissions_replica2"]; + let tables = [ + "permissions", + "permissions_replica1", + "permissions_replica2", + ]; let mut counter: HashMap = HashMap::new(); for table in tables { @@ -1350,11 +1272,9 @@ pub async fn get_data_from_permissions_by_id( ); let rows: Vec = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query) - .bind::(permission_id) - .get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query) + .bind::(permission_id) + .get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { let query = query.replace("$1", "?"); diesel::sql_query(&query) @@ -1379,7 +1299,11 @@ pub async fn get_permissions_by_role_id( conn: &mut DbConnection, role_id: &str, ) -> QueryResult> { - let tables = ["role_permissions rp", "role_permissions_replica1 rp", "role_permissions_replica2 rp"]; + let tables = [ + "role_permissions rp", + "role_permissions_replica1 rp", + "role_permissions_replica2 rp", + ]; let mut counter: HashMap = HashMap::new(); for table in tables { @@ -1392,11 +1316,9 @@ pub async fn get_permissions_by_role_id( ); let rows: Vec = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query) - .bind::(role_id) - .get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query) + .bind::(role_id) + .get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { let query = query.replace("$1", "?"); diesel::sql_query(&query) @@ -1428,7 +1350,11 @@ pub async fn get_roles_by_permission_id( conn: &mut DbConnection, permission_id: &str, ) -> QueryResult> { - let tables = ["role_permissions rp", "role_permissions_replica1 rp", "role_permissions_replica2 rp"]; + let tables = [ + "role_permissions rp", + "role_permissions_replica1 rp", + "role_permissions_replica2 rp", + ]; let mut counter: HashMap = HashMap::new(); for table in tables { @@ -1438,11 +1364,9 @@ pub async fn get_roles_by_permission_id( ); let rows: Vec = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(&query) - .bind::(permission_id) - .get_results(pg_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(&query) + .bind::(permission_id) + .get_results(pg_conn)?, DbConnection::Sqlite(sqlite_conn) => { let query = query.replace("$1", "?"); diesel::sql_query(&query) @@ -1461,4 +1385,4 @@ pub async fn get_roles_by_permission_id( .filter(|(_, count)| *count >= 2) .map(|(data, _)| data) .collect()) -} \ No newline at end of file +} diff --git a/src/db/insert.rs b/src/db/insert.rs index 04a8123..477b22c 100644 --- a/src/db/insert.rs +++ b/src/db/insert.rs @@ -6,11 +6,9 @@ use crate::db::db::DbConnection; * **/ use chrono::Utc; -use diesel::sql_types::{Integer, Text, Nullable}; +use diesel::sql_types::{Integer, Nullable, Text}; use diesel::{sql_query, Connection, PgConnection, QueryResult, RunQueryDsl, SqliteConnection}; use serde_json::Value; -use crate::db::check_exist::check_role_permission_exists; - fn insert_metadata_in_transaction_pg( transaction: &mut PgConnection, @@ -283,22 +281,10 @@ pub async fn insert_users( ) -> QueryResult<()> { match conn { DbConnection::Pg(pg_conn) => pg_conn.transaction(|transaction| { - insert_users_in_transaction_pg( - transaction, - user_id, - username, - password, - role_id, - ) + insert_users_in_transaction_pg(transaction, user_id, username, password, role_id) }), DbConnection::Sqlite(sqlite_conn) => sqlite_conn.transaction(|transaction| { - insert_users_in_transaction_sqlite( - transaction, - user_id, - username, - password, - role_id, - ) + insert_users_in_transaction_sqlite(transaction, user_id, username, password, role_id) }), } .expect("unknow conn in insert_users"); @@ -388,7 +374,7 @@ pub async fn insert_security( container_id, image_id, security_value, - measure_time + measure_time, ) }), DbConnection::Sqlite(sqlite_conn) => sqlite_conn.transaction(|transaction| { @@ -399,11 +385,11 @@ pub async fn insert_security( container_id, image_id, security_value, - measure_time + measure_time, ) }), } - .expect("unknow conn in insert_security"); + .expect("unknow conn in insert_security"); Ok(()) } @@ -498,7 +484,7 @@ pub async fn insert_routermgr( ) }), } - .expect("unknow conn in insert_routermgr"); + .expect("unknow conn in insert_routermgr"); Ok(()) } @@ -509,7 +495,11 @@ fn insert_payload_in_transaction_pg( data: &str, ) -> QueryResult<()> { // 表列表 - let table_array: [&str; 3] = ["payload_data", "payload_data_replica1", "payload_data_replica2"]; + let table_array: [&str; 3] = [ + "payload_data", + "payload_data_replica1", + "payload_data_replica2", + ]; for table_name in table_array { // 使用参数绑定构建插入查询 @@ -537,7 +527,11 @@ fn insert_payload_in_transaction_sqlite( kind: &str, data: &str, ) -> QueryResult<()> { - let table_array: [&str; 3] = ["payload_data", "payload_data_replica1", "payload_data_replica2"]; + let table_array: [&str; 3] = [ + "payload_data", + "payload_data_replica1", + "payload_data_replica2", + ]; for table_name in table_array { let insert_users_query = format!( @@ -565,23 +559,13 @@ pub async fn insert_payload( ) -> QueryResult<()> { match conn { DbConnection::Pg(pg_conn) => pg_conn.transaction(|transaction| { - insert_payload_in_transaction_pg( - transaction, - cluster_id, - kind, - data, - ) + insert_payload_in_transaction_pg(transaction, cluster_id, kind, data) }), DbConnection::Sqlite(sqlite_conn) => sqlite_conn.transaction(|transaction| { - insert_payload_in_transaction_sqlite( - transaction, - cluster_id, - kind, - data, - ) + insert_payload_in_transaction_sqlite(transaction, cluster_id, kind, data) }), } - .expect("unknow conn in insert_payload"); + .expect("unknow conn in insert_payload"); Ok(()) } @@ -636,7 +620,11 @@ pub async fn insert_permissions( description: Option<&str>, ) -> QueryResult<()> { let current_time = Utc::now().naive_utc().to_string(); - let tables = ["permissions", "permissions_replica1", "permissions_replica2"]; + let tables = [ + "permissions", + "permissions_replica1", + "permissions_replica2", + ]; for table in tables { let query = format!( @@ -680,7 +668,11 @@ pub async fn insert_role_permission( permission_id: &str, ) -> QueryResult<()> { let current_time = Utc::now().naive_utc().to_string(); - let tables = ["role_permissions", "role_permissions_replica1", "role_permissions_replica2"]; + let tables = [ + "role_permissions", + "role_permissions_replica1", + "role_permissions_replica2", + ]; for table in tables { let query = format!( @@ -712,4 +704,4 @@ pub async fn insert_role_permission( } } Ok(()) -} \ No newline at end of file +} diff --git a/src/db/mod.rs b/src/db/mod.rs index ebbc879..a898ff2 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -9,5 +9,5 @@ pub mod db; pub mod delete; pub mod get; pub mod insert; -pub mod update; pub mod network_status_ops; +pub mod update; diff --git a/src/db/network_status_ops.rs b/src/db/network_status_ops.rs index d7e39a2..3b16cd0 100644 --- a/src/db/network_status_ops.rs +++ b/src/db/network_status_ops.rs @@ -10,11 +10,13 @@ use crate::cores::handlers::network_status::models::{ UpdateNetworkInterface, }; use crate::db::db::DbConnection; -use crate::schema::{connectivity_tests, network_interfaces}; // Ensure this path is correct +use crate::schema::{connectivity_tests, network_interfaces}; +// Ensure this path is correct use chrono::Utc; use diesel::prelude::*; -use diesel::sql_types::{Bool, Integer, Nullable, Text}; // Import necessary SQL types +use diesel::sql_types::{Bool, Integer, Nullable, Text}; +// Import necessary SQL types use diesel::{sql_query, Connection, PgConnection, QueryResult, RunQueryDsl, SqliteConnection}; /// Structure for retrieving only the ID field from database queries @@ -37,7 +39,7 @@ fn map_diesel_err(error: diesel::result::Error) -> diesel::result::Error { // For now, just returning the error, but you can customize this // to ServerError or a similar structure if you integrate this with an API framework. // For direct db ops, returning diesel::result::Error is fine. - log::error!("Database error: {}", error); + tracing::error!("Database error: {}", error); error } @@ -73,13 +75,13 @@ fn insert_network_interface_pg( if i == 0 { result = Some( sql_query(query) - .bind::(&data.interface_name) - .bind::, _>(&data.ip_address) - .bind::(&data.mac_address) - .bind::(data.is_up) - .bind::, _>(data.speed_mbps) - .bind::(¤t_time) - .bind::(¤t_time) + .bind::(&data.interface_name) + .bind::, _>(&data.ip_address) + .bind::(&data.mac_address) + .bind::(data.is_up) + .bind::, _>(data.speed_mbps) + .bind::(¤t_time) + .bind::(¤t_time) .get_result::(transaction) .map_err(map_diesel_err)?, ); @@ -136,7 +138,7 @@ fn insert_network_interface_sqlite( .bind::(¤t_time) .execute(transaction) .map_err(map_diesel_err)?; - + if i == 0 { let fetch_query = format!("SELECT id FROM {} WHERE interface_name = ?", table_name); result_id = Some( @@ -144,8 +146,8 @@ fn insert_network_interface_sqlite( .bind::(&data.interface_name) .load::(transaction) // Use local IdOnly .map_err(map_diesel_err)? - .first() - .map(|item| item.id) + .first() + .map(|item| item.id) .ok_or(diesel::result::Error::NotFound) .map_err(map_diesel_err)?, ); @@ -158,7 +160,7 @@ fn insert_network_interface_sqlite( .first::(transaction) .map_err(map_diesel_err) } else { - network_interfaces::table + network_interfaces::table .filter(network_interfaces::interface_name.eq(&data.interface_name)) .first::(transaction) .map_err(map_diesel_err) @@ -262,7 +264,7 @@ fn update_network_interface_pg( let primary_update = diesel::update(network_interfaces.filter(if_name_col.eq(name))) .set((data, updated_at.eq(¤t_time))) // Pass changeset and update time .get_result::(transaction); - + result = Some(primary_update.map_err(map_diesel_err)?); // For replicas, a raw query might be simpler if the changeset is dynamic or complex. @@ -293,7 +295,7 @@ fn update_network_interface_pg( } // interface_name itself is not typically updated when used as a key to find the record. // If it is, the query needs to handle that (e.g. update by id instead). - + if set_clauses.is_empty() { // No actual data to update other than timestamp return result.ok_or_else(|| map_diesel_err(diesel::result::Error::NotFound)); @@ -406,7 +408,7 @@ fn insert_connectivity_test_pg( // If a new test for the same IP comes, it should probably update. // Sticking to `ON CONFLICT (target_ip) DO NOTHING RETURNING ...` for insert-or-fetch. // Or `ON CONFLICT (target_ip) DO UPDATE SET is_connected = EXCLUDED.is_connected, last_checked_at = EXCLUDED.last_checked_at` - + for (i, table_name) in table_array.iter().enumerate() { let query = format!( "INSERT INTO {} (target_ip, is_connected, last_checked_at) \ @@ -418,14 +420,14 @@ fn insert_connectivity_test_pg( if i == 0 { result = Some( sql_query(query) - .bind::(&data.target_ip) - .bind::(data.is_connected) - .bind::(¤t_time) + .bind::(&data.target_ip) + .bind::(data.is_connected) + .bind::(¤t_time) .get_result(transaction) .map_err(map_diesel_err)?, ); } else { - sql_query(query) + sql_query(query) .bind::(&data.target_ip) .bind::(data.is_connected) .bind::(¤t_time) @@ -471,30 +473,30 @@ fn insert_connectivity_test_sqlite( .bind::(¤t_time) .execute(transaction) .map_err(map_diesel_err)?; - + if i == 0 { - // Fetch the ID or confirm row + // Fetch the ID or confirm row let fetch_query = format!("SELECT id FROM {} WHERE target_ip = ?", table_name); result_id = Some( sql_query(fetch_query) .bind::(&data.target_ip) .load::(transaction) // Use local IdOnly .map_err(map_diesel_err)? - .first() - .map(|item| item.id) + .first() + .map(|item| item.id) .ok_or(diesel::result::Error::NotFound) .map_err(map_diesel_err)?, ); } } - + if let Some(id_val) = result_id { connectivity_tests::table .filter(connectivity_tests::id.eq(id_val)) .first::(transaction) .map_err(map_diesel_err) } else { - connectivity_tests::table + connectivity_tests::table .filter(connectivity_tests::target_ip.eq(&data.target_ip)) .first::(transaction) .map_err(map_diesel_err) @@ -574,7 +576,6 @@ mod tests { use crate::db::db::DbConnection; use diesel::connection::SimpleConnection; use diesel::r2d2::{ConnectionManager, Pool}; - use diesel::RunQueryDsl; use std::env; use std::sync::Once; @@ -595,7 +596,7 @@ mod tests { .expect("Failed to create pool"); let mut conn = pool.get().expect("Failed to get connection from pool"); - + // Create test tables conn.batch_execute( r#" @@ -691,10 +692,10 @@ mod tests { // Test update let update_data = UpdateNetworkInterface { interface_name: Some("eth0".to_string()), - ip_address:Some("192.168.1.2".to_string()), - mac_address:Some("00:11:22:33:44:66".to_string()), - is_up:Some(false), - speed_mbps:Some(100), + ip_address: Some("192.168.1.2".to_string()), + mac_address: Some("00:11:22:33:44:66".to_string()), + is_up: Some(false), + speed_mbps: Some(100), }; let updated = update_network_interface_by_name(&mut conn, "eth0", &update_data) @@ -760,13 +761,11 @@ mod tests { let mut conn = setup_test_db(); // Test get non-existent network interface - let result = get_network_interface_by_name(&mut conn, "non_existent") - .await; + let result = get_network_interface_by_name(&mut conn, "non_existent").await; assert!(result.is_err()); // Test get non-existent connectivity test - let result = get_connectivity_test_by_ip(&mut conn, "0.0.0.0") - .await; + let result = get_connectivity_test_by_ip(&mut conn, "0.0.0.0").await; assert!(result.is_err()); } } diff --git a/src/db/update.rs b/src/db/update.rs index 30c3bb2..a47b130 100644 --- a/src/db/update.rs +++ b/src/db/update.rs @@ -1,4 +1,6 @@ use crate::db::db::DbConnection; +use chrono::Utc; +use diesel::sql_types::{Integer, Nullable, Text}; /** * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn @@ -7,8 +9,6 @@ use crate::db::db::DbConnection; **/ use diesel::{QueryResult, RunQueryDsl}; use serde_json::Value; -use diesel::sql_types::{Integer, Text, Nullable}; -use chrono::Utc; pub async fn update_data_in_kine( conn: &mut DbConnection, @@ -18,7 +18,6 @@ pub async fn update_data_in_kine( item_namespace: Option<&str>, json_data: &Value, ) -> QueryResult { - // 需要更新的表列表 let tables = ["kine", "kine_replica1", "kine_replica2"]; let mut total_rows_affected = 0; @@ -104,7 +103,6 @@ pub async fn update_password_in_users_by_id( user_id: &str, password: &str, ) -> QueryResult { - // 需要更新的表列表 let tables = ["users", "users_replica1", "users_replica2"]; let mut total_rows_affected = 0; @@ -123,21 +121,16 @@ pub async fn update_password_in_users_by_id( // 执行更新操作 let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(update_query) - .bind::(password) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(user_id) - .execute(pg_conn)? - - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(update_query) - .bind::(password) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(user_id) - .execute(sqlite_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(update_query) + .bind::(password) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(user_id) + .execute(pg_conn)?, + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(update_query) + .bind::(password) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(user_id) + .execute(sqlite_conn)?, }; total_rows_affected += rows_affected; @@ -152,7 +145,6 @@ pub async fn update_username_in_users_by_id( user_id: &str, username: &str, ) -> QueryResult { - // 需要更新的表列表 let tables = ["users", "users_replica1", "users_replica2"]; let mut total_rows_affected = 0; @@ -171,21 +163,16 @@ pub async fn update_username_in_users_by_id( // 执行更新操作 let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(update_query) - .bind::(username) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(user_id) - .execute(pg_conn)? - - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(update_query) - .bind::(username) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(user_id) - .execute(sqlite_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(update_query) + .bind::(username) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(user_id) + .execute(pg_conn)?, + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(update_query) + .bind::(username) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(user_id) + .execute(sqlite_conn)?, }; total_rows_affected += rows_affected; @@ -203,7 +190,6 @@ pub async fn update_routermgr( next_hop: &str, intf_id: i32, ) -> QueryResult { - // 需要更新的表列表 let tables = ["routermgr", "routermgr_replica1", "routermgr_replica2"]; let mut total_rows_affected = 0; @@ -222,25 +208,20 @@ pub async fn update_routermgr( // 执行更新操作 let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - diesel::sql_query(update_query) - .bind::(next_hop) - .bind::(intf_id) - .bind::(msg_from) - .bind::(dst_ip) - .bind::(prefix_len) - .execute(pg_conn)? - - } - DbConnection::Sqlite(sqlite_conn) => { - diesel::sql_query(update_query) - .bind::(next_hop) - .bind::(intf_id) - .bind::(msg_from) - .bind::(dst_ip) - .bind::(prefix_len) - .execute(sqlite_conn)? - } + DbConnection::Pg(pg_conn) => diesel::sql_query(update_query) + .bind::(next_hop) + .bind::(intf_id) + .bind::(msg_from) + .bind::(dst_ip) + .bind::(prefix_len) + .execute(pg_conn)?, + DbConnection::Sqlite(sqlite_conn) => diesel::sql_query(update_query) + .bind::(next_hop) + .bind::(intf_id) + .bind::(msg_from) + .bind::(dst_ip) + .bind::(prefix_len) + .execute(sqlite_conn)?, }; total_rows_affected += rows_affected; @@ -250,7 +231,6 @@ pub async fn update_routermgr( Ok(total_rows_affected > 1) } - pub async fn update_role_name_in_roles_by_id( conn: &mut DbConnection, role_id: &str, @@ -300,7 +280,11 @@ pub async fn update_permission_in_permissions_by_id( description: Option<&str>, ) -> QueryResult<()> { let current_time = Utc::now().naive_utc().to_string(); - let tables = ["permissions", "permissions_replica1", "permissions_replica2"]; + let tables = [ + "permissions", + "permissions_replica1", + "permissions_replica2", + ]; for table in tables { let query = format!( @@ -372,4 +356,4 @@ pub async fn update_role_in_users_by_id( } } Ok(()) -} \ No newline at end of file +} diff --git a/src/lib.rs b/src/lib.rs index 951deb1..66262d7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,11 +7,11 @@ pub mod cores; #[cfg(any(feature = "servers"))] pub mod db; +pub mod middleware; #[cfg(any(feature = "servers"))] pub mod schema; #[cfg(any(feature = "servers", feature = "utils"))] pub mod utils; -pub mod middleware; #[cfg(any(feature = "servers", feature = "messaging"))] pub use cores::daemons::messaging; @@ -19,4 +19,4 @@ pub use cores::daemons::messaging; pub use cores::{prepare_app_state, start_server}; #[cfg(feature = "os_socket")] -pub use os_socket_comms; \ No newline at end of file +pub use os_socket_comms; diff --git a/src/middleware/mod.rs b/src/middleware/mod.rs index 2108f68..79c66ba 100644 --- a/src/middleware/mod.rs +++ b/src/middleware/mod.rs @@ -1 +1 @@ -pub mod token; \ No newline at end of file +pub mod token; diff --git a/src/middleware/token.rs b/src/middleware/token.rs index 4af3f8a..12d416f 100644 --- a/src/middleware/token.rs +++ b/src/middleware/token.rs @@ -2,12 +2,16 @@ use std::future::{ready, Ready}; use std::rc::Rc; use std::task::{Context, Poll}; -use actix_web::{dev::{ServiceRequest, ServiceResponse}, error, Error, HttpMessage}; use actix_web::body::MessageBody; use actix_web::dev::{Service, Transform}; +use actix_web::{ + dev::{ServiceRequest, ServiceResponse}, + error, Error, HttpMessage, +}; use futures::future::LocalBoxFuture; -use crate::utils::token::decode_token; // 你已有的 decode_token 函数 +use crate::utils::token::decode_token; +// 你已有的 decode_token 函数 pub struct TokenAuth { pub secret: Vec, @@ -59,7 +63,10 @@ where } // 检查 Authorization 头 - let auth_header = req.headers().get("Authorization").and_then(|h| h.to_str().ok()); + let auth_header = req + .headers() + .get("Authorization") + .and_then(|h| h.to_str().ok()); if let Some(auth_header) = auth_header { if let Some(token) = auth_header.strip_prefix("Bearer ") { @@ -71,16 +78,16 @@ where Box::pin(async move { fut.await }) } Err(_) => { - Box::pin(async move { - Err(error::ErrorUnauthorized("Invalid token")) - }) + Box::pin(async move { Err(error::ErrorUnauthorized("Invalid token")) }) } - } + }; } } Box::pin(async move { - Err(error::ErrorUnauthorized("Missing or malformed Authorization header")) + Err(error::ErrorUnauthorized( + "Missing or malformed Authorization header", + )) }) } } diff --git a/src/schema.rs b/src/schema.rs index f481005..2e5f97e 100644 --- a/src/schema.rs +++ b/src/schema.rs @@ -254,7 +254,6 @@ diesel::table! { } } - diesel::table! { routermgr (msg_from, dst_ip, prefixLen) { msg_from -> Text, @@ -367,7 +366,6 @@ diesel::joinable!(users -> roles (role_id)); diesel::joinable!(users_replica1 -> roles (role_id)); diesel::joinable!(users_replica2 -> roles (role_id)); - diesel::allow_tables_to_appear_in_same_query!( connectivity_tests, connectivity_tests_replica1, diff --git a/src/utils/mod.rs b/src/utils/mod.rs index e2c0fe2..0b89aa3 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -1,7 +1,9 @@ pub mod headers; -mod uuid; +pub mod password; +pub mod request_context; #[cfg(feature = "test")] pub mod test; -pub mod password; pub mod token; +mod uuid; + pub use uuid::*; diff --git a/src/utils/password.rs b/src/utils/password.rs index 9197287..8d93e7b 100644 --- a/src/utils/password.rs +++ b/src/utils/password.rs @@ -7,4 +7,3 @@ pub fn hash_password(password: &str) -> Result { pub fn verify_password(password: &str, hash: &str) -> Result { verify(password, hash) } - diff --git a/src/utils/request_context.rs b/src/utils/request_context.rs new file mode 100644 index 0000000..aac05c7 --- /dev/null +++ b/src/utils/request_context.rs @@ -0,0 +1,46 @@ +// common/src/request_context.rs +use tracing::Span; +use uuid::Uuid; + +#[derive(Clone)] +pub struct RequestContext { + pub id: String, + pub span: Span, // 关键:携带tracing span +} + +impl RequestContext { + pub fn new(source: &str) -> Self { + let id = format!("{}_{}", source, Uuid::new_v4()); + let span = tracing::info_span!("request", request_id = %id); + RequestContext { id, span } + } + + // 进入上下文(设置当前线程的span) + pub fn enter(&self) -> tracing::span::Entered<'_> { + self.span.enter() + } + + // 跨线程传递时使用 + pub fn to_remote(self) -> RemoteContext { + RemoteContext { + id: self.id, + span: self.span.clone(), + } + } +} + +// 用于跨线程/异步边界传递 +#[derive(Clone)] +pub struct RemoteContext { + pub id: String, + span: Span, +} + +impl RemoteContext { + pub fn activate(&self) -> RequestContext { + RequestContext { + id: self.id.clone(), + span: self.span.clone(), + } + } +} diff --git a/src/utils/test.rs b/src/utils/test.rs index 70beeb3..d9df234 100644 --- a/src/utils/test.rs +++ b/src/utils/test.rs @@ -33,8 +33,6 @@ pub async fn setup_message_cli() -> Messaging { ); } - - // unsafe { // datamgr_api::SetParameter( // data_plugin_manager, @@ -98,7 +96,7 @@ pub async fn start_test_api_server( app_state.watch_daemon.start().await; app_state.consensus_daemon.start().await; - log::info!("Starting test API server"); + tracing::info!("Starting test API server"); // 启动各个server let messaging_server: Box = Box::new(MessagingServer); let actix_web_tcp_server: Box = servers::actix_web::tcp(tcp_address.as_str()); diff --git a/src/utils/token.rs b/src/utils/token.rs index a44bbc6..121ec10 100644 --- a/src/utils/token.rs +++ b/src/utils/token.rs @@ -1,11 +1,11 @@ -use std::{fs, io}; -use std::io::Write; -use std::path::{Path, PathBuf}; +use base64::{engine::general_purpose, Engine}; use chrono::{Duration, Utc}; -use serde::{Deserialize, Serialize}; use jsonwebtoken::{decode, encode, Algorithm, DecodingKey, EncodingKey, Header, Validation}; -use base64::{engine::general_purpose, Engine}; use rand::Rng; +use serde::{Deserialize, Serialize}; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::{fs, io}; pub fn get_or_create_secret(file_path: &PathBuf) -> io::Result> { let path = Path::new(file_path); @@ -35,28 +35,38 @@ pub fn get_or_create_secret(file_path: &PathBuf) -> io::Result> { Ok(key.to_vec()) } - #[derive(Debug, Serialize, Deserialize)] pub struct Claims { pub user_id: String, pub username: String, pub exp: usize, } -pub fn generate_token(user_id: &str, username: &str, token_secret: &[u8]) -> Result { +pub fn generate_token( + user_id: &str, + username: &str, + token_secret: &[u8], +) -> Result { let expiration = Utc::now() + Duration::hours(24); let claims = Claims { user_id: user_id.to_owned(), username: username.to_owned(), exp: expiration.timestamp() as usize, }; - encode(&Header::default(), &claims, &EncodingKey::from_secret(token_secret)) + encode( + &Header::default(), + &claims, + &EncodingKey::from_secret(token_secret), + ) } -pub fn decode_token(token: &str, token_secret: &[u8]) -> Result { +pub fn decode_token( + token: &str, + token_secret: &[u8], +) -> Result { let token_data = decode::( token, &DecodingKey::from_secret(token_secret), &Validation::new(Algorithm::HS256), )?; Ok(token_data.claims) -} \ No newline at end of file +} diff --git a/src/utils/uuid.rs b/src/utils/uuid.rs index 892d1fd..6b666e8 100644 --- a/src/utils/uuid.rs +++ b/src/utils/uuid.rs @@ -2,7 +2,6 @@ use std::path::PathBuf; use std::{fs, io}; use uuid::Uuid; - pub fn get_or_create_uuid(file_path: &PathBuf) -> io::Result { let path = file_path.as_path(); @@ -25,4 +24,4 @@ pub fn get_or_create_uuid(file_path: &PathBuf) -> io::Result { fs::write(path, &uuid)?; Ok(uuid) -} \ No newline at end of file +} -- Gitee