diff --git a/.gitignore b/.gitignore index 49f8c02d36a54b86c4ed6f007de69530f423c0bd..fc336a84c6b13b5141439d18c79dc0828f3a2c69 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk +database.sqlite +test-database.sqlite \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index f509f09ec3dee0bbb9b3d6803d7094a369546c9e..48f50a6e3d98a0336384ea3b2a840c761ef6c2ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags", + "bitflags 2.6.0", "bytes", "futures-core", "futures-sink", @@ -31,7 +31,7 @@ dependencies = [ "actix-utils", "ahash", "base64 0.22.1", - "bitflags", + "bitflags 2.6.0", "brotli", "bytes", "bytestring", @@ -39,7 +39,7 @@ dependencies = [ "encoding_rs", "flate2", "futures-core", - "h2", + "h2 0.3.26", "http 0.2.12", "httparse", "httpdate", @@ -65,7 +65,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -182,7 +182,7 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -289,7 +289,7 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -299,14 +299,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "async-nats" @@ -323,7 +323,7 @@ dependencies = [ "nuid", "once_cell", "regex", - "rustls-pemfile", + "rustls-pemfile 0.3.0", "serde", "serde_json", "serde_nanos", @@ -331,7 +331,7 @@ dependencies = [ "subslice", "time", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-util", "url", "webpki-roots", @@ -356,7 +356,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -367,15 +367,75 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", +] + +[[package]] +name = "atomic" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 0.1.2", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.74" @@ -397,6 +457,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "base64" version = "0.22.1" @@ -433,6 +499,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.6.0" @@ -496,6 +568,12 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" +[[package]] +name = "bytemuck" +version = "1.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" + [[package]] name = "byteorder" version = "1.5.0" @@ -571,6 +649,12 @@ dependencies = [ "phf_codegen", ] +[[package]] +name = "cidr" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bdf600c45bd958cf2945c445264471cca8b6c8e67bc87b71affd6d7e5682621" + [[package]] name = "colorchoice" version = "1.0.2" @@ -600,6 +684,16 @@ dependencies = [ "version_check", ] +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -624,6 +718,18 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "cri-api" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055697c8a1381d2196852aa8cdf9b7cde763bb1f811a8a8c47dd8d97b1872f3b" +dependencies = [ + "prost", + "serde", + "tonic", + "tonic-build", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -668,7 +774,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn", + "syn 2.0.90", ] [[package]] @@ -679,7 +785,20 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn", + "syn 2.0.90", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", ] [[package]] @@ -717,7 +836,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn", + "syn 2.0.90", ] [[package]] @@ -726,7 +845,7 @@ version = "2.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "158fe8e2e68695bd615d7e4f3227c0727b151330d3e253b525086c348d055d5e" dependencies = [ - "bitflags", + "bitflags 2.6.0", "byteorder", "diesel_derives", "itoa", @@ -746,7 +865,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -766,7 +885,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn", + "syn 2.0.90", ] [[package]] @@ -796,7 +915,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -816,7 +935,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -899,6 +1018,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "fancy-regex" version = "0.13.0" @@ -911,10 +1040,15 @@ dependencies = [ ] [[package]] -name = "feventbus" -version = "0.3.0" +name = "fastrand" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b7cca7940f29e24592a89727028106825660bd8f9aae1d85283008fea92220" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" + +[[package]] +name = "feventbus" +version = "0.3.1" +source = "git+https://gitee.com/iscas-system/eventbus.git#b38c27f76ebfb8888f7927bb5c36939f45c76eea" dependencies = [ "async-nats", "async-trait", @@ -928,6 +1062,12 @@ dependencies = [ "tokio", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flate2" version = "1.0.34" @@ -945,6 +1085,7 @@ dependencies = [ "actix-http", "actix-service", "actix-web", + "anyhow", "async-stream", "async-trait", "chrono", @@ -953,17 +1094,52 @@ dependencies = [ "dotenv", "env_logger", "feventbus", + "fleetmod", + "futures", + "itertools 0.13.0", + "json-patch", "jsonschema", "k8s-openapi", "lazy_static", + "log", "once_cell", "r2d2", + "reqwest", "schemars", "serde", "serde_json", + "serde_yaml", + "serial_test", + "strum", "tokio", ] +[[package]] +name = "fleetmacros" +version = "0.1.0" +source = "git+https://gitee.com/iscas-system/fleetmacros#8ef2895a5d1bb3a1c2d619d989cd23aaeeb16fb3" +dependencies = [ + "darling", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "fleetmod" +version = "0.1.1" +source = "git+https://gitee.com/iscas-system/fleetmod.git#9f1dd10055f6fcbb8cf544be2027a9947473cd5a" +dependencies = [ + "anyhow", + "chrono", + "cidr", + "cri-api", + "fleetmacros", + "regex", + "serde", + "serde_yaml", + "uuid", +] + [[package]] name = "fluent-uri" version = "0.3.2" @@ -981,6 +1157,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1056,7 +1247,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -1128,13 +1319,44 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap", + "indexmap 2.6.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", "tracing", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + [[package]] name = "hashbrown" version = "0.15.0" @@ -1175,6 +1397,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.1" @@ -1194,7 +1427,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body", + "http-body 1.0.1", "pin-project-lite", ] @@ -1216,6 +1449,30 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.5.0" @@ -1225,8 +1482,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", + "h2 0.4.7", "http 1.1.0", - "http-body", + "http-body 1.0.1", "httparse", "itoa", "pin-project-lite", @@ -1235,6 +1493,51 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.5.0", + "hyper-util", + "rustls 0.23.19", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.1", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.32", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.5.0", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.9" @@ -1245,8 +1548,8 @@ dependencies = [ "futures-channel", "futures-util", "http 1.1.0", - "http-body", - "hyper", + "http-body 1.0.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", @@ -1392,7 +1695,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -1429,6 +1732,16 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206ca75c9c03ba3d4ace2460e57b189f39f43de612c2f85836e65c929701bb2d" +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + [[package]] name = "indexmap" version = "2.6.0" @@ -1436,7 +1749,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.0", ] [[package]] @@ -1451,6 +1764,24 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -1475,6 +1806,28 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "863726d7afb6bc2590eeff7135d923545e5e964f004c2ccf8716c25e70a86f08" +dependencies = [ + "jsonptr", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonptr" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dea2b27dd239b2556ed7a25ba842fe47fd602e7fc7433c2a8d6106d4d9edd70" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "jsonschema" version = "0.23.0" @@ -1543,6 +1896,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + [[package]] name = "litemap" version = "0.7.3" @@ -1582,6 +1941,12 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "memchr" version = "2.7.4" @@ -1634,17 +1999,40 @@ dependencies = [ "libc", "log", "wasi", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] -name = "nkeys" -version = "0.2.0" +name = "multimap" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e66a7cd1358277b2a6f77078e70aea7315ff2f20db969cc61153103ec162594" -dependencies = [ - "byteorder", - "data-encoding", +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nkeys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e66a7cd1358277b2a6f77078e70aea7315ff2f20db969cc61153103ec162594" +dependencies = [ + "byteorder", + "data-encoding", "ed25519-dalek", "getrandom", "log", @@ -1768,6 +2156,50 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "openssl" +version = "0.10.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "ordered-float" version = "2.10.1" @@ -1836,6 +2268,16 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.6.0", +] + [[package]] name = "phf" version = "0.11.2" @@ -1875,6 +2317,26 @@ dependencies = [ "uncased", ] +[[package]] +name = "pin-project" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "pin-project-lite" version = "0.2.14" @@ -1929,6 +2391,16 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "prettyplease" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +dependencies = [ + "proc-macro2", + "syn 2.0.90", +] + [[package]] name = "proc-macro2" version = "1.0.91" @@ -1938,6 +2410,59 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prost" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +dependencies = [ + "bytes", + "heck", + "itertools 0.12.1", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.90", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools 0.12.1", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "prost-types" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +dependencies = [ + "prost", +] + [[package]] name = "quote" version = "1.0.37" @@ -2000,7 +2525,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags", + "bitflags 2.6.0", ] [[package]] @@ -2020,7 +2545,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -2038,9 +2563,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -2079,26 +2604,34 @@ checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", + "encoding_rs", "futures-channel", "futures-core", "futures-util", + "h2 0.4.7", "http 1.1.0", - "http-body", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.5.0", + "hyper-rustls", + "hyper-tls", "hyper-util", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", + "rustls-pemfile 2.2.0", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.1", + "system-configuration", "tokio", + "tokio-native-tls", "tower-service", "url", "wasm-bindgen", @@ -2134,7 +2667,7 @@ dependencies = [ "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -2152,6 +2685,19 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +dependencies = [ + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + [[package]] name = "rustls" version = "0.20.9" @@ -2164,6 +2710,19 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls" +version = "0.23.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +dependencies = [ + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + [[package]] name = "rustls-pemfile" version = "0.3.0" @@ -2173,12 +2732,53 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" + +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "ring 0.17.8", + "rustls-pki-types", + "untrusted 0.9.0", +] + +[[package]] +name = "rustversion" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" + [[package]] name = "ryu" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +[[package]] +name = "schannel" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "scheduled-thread-pool" version = "0.2.7" @@ -2209,7 +2809,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn", + "syn 2.0.90", ] [[package]] @@ -2228,6 +2828,29 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "1.0.23" @@ -2261,7 +2884,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -2272,7 +2895,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -2304,7 +2927,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -2328,6 +2951,44 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.6.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "serial_test" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c789ec87f4687d022a2405cf46e0cd6284889f1839de292cadeb6c6019506f2" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "sha1" version = "0.10.6" @@ -2339,6 +3000,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + [[package]] name = "sha2" version = "0.9.9" @@ -2413,7 +3080,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -2449,6 +3116,28 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.90", +] + [[package]] name = "subslice" version = "0.2.3" @@ -2466,15 +3155,32 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.89" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "sync_wrapper" version = "1.0.1" @@ -2492,7 +3198,41 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", ] [[package]] @@ -2512,7 +3252,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -2586,7 +3326,17 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", ] [[package]] @@ -2597,7 +3347,17 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", ] [[package]] @@ -2606,11 +3366,32 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls", + "rustls 0.20.9", "tokio", "webpki", ] +[[package]] +name = "tokio-rustls" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +dependencies = [ + "rustls 0.23.19", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.12" @@ -2651,13 +3432,79 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", "winnow", ] +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.21.7", + "bytes", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4ef6dd70a610078cb4e338a0f79d06bc759ff1b22d2120c2ff02ae264ba9c2" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -2672,9 +3519,21 @@ checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "tracing-core" version = "0.1.32" @@ -2726,6 +3585,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.7.1" @@ -2772,6 +3637,11 @@ name = "uuid" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +dependencies = [ + "atomic", + "getrandom", + "sha1_smol", +] [[package]] name = "uuid-simd" @@ -2839,7 +3709,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.90", "wasm-bindgen-shared", ] @@ -2873,7 +3743,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2983,6 +3853,15 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -3088,7 +3967,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", "synstructure", ] @@ -3110,7 +3989,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -3130,7 +4009,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", "synstructure", ] @@ -3151,7 +4030,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] @@ -3173,7 +4052,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.90", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1e3061b018d99dd817f175371e6844b2e50a29a9..e251f16da7f5913050db4b4c104e35d7feba5def 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,9 @@ repository = "https://gitee.com/iscas-system/apiserver" readme = "README.md" [dependencies] -feventbus = "0.3.0" +#feventbus = "0.3.0" +feventbus = { git = "https://gitee.com/iscas-system/eventbus.git" } +fleetmod = { git = "https://gitee.com/iscas-system/fleetmod.git" } r2d2 = "0.8.10" dotenv = "0.15.0" diesel = { version = "2.2.0", features = ["sqlite", "postgres", "r2d2"] } @@ -30,4 +32,13 @@ schemars = "0.8.21" chrono = "0.4.38" once_cell = "1.20.2" lazy_static = "1.5.0" -async-stream = "0.3.6" \ No newline at end of file +async-stream = "0.3.6" +serial_test = "0.10.0" +reqwest = "0.12.9" +log = "0.4.22" +itertools = "0.13.0" +anyhow = "1.0.94" +json-patch = "3.0.1" +strum = { version = "0.26.3", features = ["derive"] } +serde_yaml = "0.9.34+deprecated" +futures = "0.3.31" diff --git a/src/cores/apiserver.rs b/src/cores/apiserver.rs index 1a0597a33f92e6c4f648c7ec5e67164958fefdde..af1d955afdfbccc5a8821fd7df056281d89a7d3a 100644 --- a/src/cores/apiserver.rs +++ b/src/cores/apiserver.rs @@ -1,750 +1,159 @@ -use actix_web::{HttpServer, App, web, HttpResponse}; - -use crate::cores::config::{Config, APIS_WITHOUT_NAMESPACE, APIS_WITH_NAMESPACE, API_WITHOUT_NAMESPACE, API_WITH_NAMESPACE}; -use crate::cores::handlers::{EventManager, Handler}; - -use std::sync::{Arc}; +use crate::cores::events::WatchEventPublisher; +use crate::cores::handlers::{APIServerResponse, Handler}; +use crate::cores::services::{APIServerError, APIServerResult}; +use crate::db::db::DbPool; +use actix_web::dev::Server; +use actix_web::web::route; +use actix_web::{http, web, App, Error, HttpResponse, HttpServer}; use feventbus::impls::nats::nats::NatsCli; -use feventbus::traits::controller::EventBus; -use crate::cores::db::{DbPool}; - -pub struct ApiServer -{ - // Config是用户请求URL与路由处理器的映射关系封装 - config: Arc, +use futures::StreamExt; +use http::Method; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::sync::Arc; + +pub struct APIServer {} + +#[derive(Clone)] +pub struct AppState { + pub db_pool: Arc, + pub nats_cli: Arc, + pub handler: Arc, + pub watch_event_publisher: Arc, } -impl ApiServer -{ - pub fn new (config: Box) -> Self { - ApiServer { - config: Arc::from(config), - } +impl APIServer { + pub fn new() -> Self { + APIServer {} } - // TODO 未来加上else - // TODO 优化注册流程 - pub async fn start - (self: Arc, addr: &str, handler: T, db_pool: Arc) -> Result<(), std::io::Error> { - - let handler = Arc::new(handler); - // 初始化 NatsCli 实例 - let nats_cli = Arc::new(NatsCli::new().await.unwrap()); - - // 初始化事件管理器 - let event_manager = EventManager::new(); - + pub fn start(&self, addr: &str, app_state: AppState) -> Server { let server = HttpServer::new(move || { + let app = App::new() + .app_data(web::Data::new(app_state.clone())); + let routes = K8sStyleRoute::get_routes(); + let app = routes.iter().fold(app, |app, route| { + log::info!("register route: {} {}", route.get_method(), route.get_path()); + app.route(route.get_path().as_str(), route.get_web_route()) + }); + let app = app.default_service(web::to(move || async { + Ok::( + HttpResponse::NotFound().body("invalid url, see https://gitee.com/iscas-system/apiserver/wikis/pages?sort_id=12661944&doc_id=6135774")) + })); + app + }); + // 考虑到资源约束,目前只启动一个worker + // TODO 未来用户可以自定义该配置worker数 + server.workers(1).bind(addr).expect("reason").run() + } +} - let mut app = App::new(); - let handler = Arc::clone(&handler); - let config = Arc::clone(&self.config); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - // URL是手动注册的,不会存在APIS_WITHOUT_NAMESPACE, APIS_WITH_NAMESPACE, - // API_WITHOUT_NAMESPACE, API_WITH_NAMESPACE之外情况 - for (key, route) in config.create_routes() { - if route == API_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::post().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct K8sStylePathParams { + pub group: Option, + pub version: String, + pub namespace: Option, + pub plural: String, + pub name: Option, +} - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .create_api_without_namespace(path, data, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == API_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); +pub trait APIServerRoute { + fn get_method(&self) -> Method; + fn get_path(&self) -> String; + fn get_web_route(&self) -> actix_web::Route; +} - app = app.route(key, web::post().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); +pub struct K8sStyleRoute { + pub method: Method, + pub with_group: bool, + pub with_namespace: bool, + pub with_name: bool, +} - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .create_api_with_namespace(path, data, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); +impl APIServerRoute for K8sStyleRoute { + fn get_method(&self) -> Method { + self.method.clone() + } - app = app.route(key, web::post().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); + // apis/{group}/{version}/[namespaces/{namespace}: 可选]/{plural}/[{name}: 可选] + // api/{version}/[namespaces/{namespace}: 可选]/{plural}/[{name}: 可选] + fn get_path(&self) -> String { + let prefix = if self.with_group { "apis/{group}/" } else { "api/" }; + let version = "{version}/"; + let namespace = if self.with_namespace { "namespaces/{namespace}/" } else { "" }; + let plural_and_name = if self.with_name { "{plural}/{name}" } else { "{plural}" }; + format!("{}{}{}{}", prefix, version, namespace, plural_and_name) + } - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .create_apis_without_namespace(path, data, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); + fn get_web_route(&self) -> actix_web::Route { + route().method(self.method.clone()).to(Self::handler) + } +} - app = app.route(key, web::post().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); +impl K8sStyleRoute { + pub fn get_routes() -> Vec> { + itertools::iproduct!(vec![Method::GET, Method::POST, Method::PUT, Method::DELETE, Method::PATCH], vec![true, false], vec![true, false], vec![true, false]) + .map(|(method, with_group, with_namespace, with_name)| { + Box::new(Self { method, with_group, with_namespace, with_name }) as Box + }).collect() + } - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .create_apis_with_namespace(path, data, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } + async fn parse_body(mut payload: web::Payload) -> APIServerResult { + // payload is a stream of Bytes objects + let mut body = web::BytesMut::new(); + let parse_chunk_error = APIServerError::bad_request("parse payload chunk error"); + while let Some(chunk) = payload.next().await { + let chunk = chunk; + if chunk.is_err() { + return Err(parse_chunk_error); } - - // URL是手动注册的,不会存在APIS_WITHOUT_NAMESPACE, APIS_WITH_NAMESPACE, - // API_WITHOUT_NAMESPACE, API_WITH_NAMESPACE之外情况 - for (key, route) in config.delete_routes() { - if route == API_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::delete().to( - move |path| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .delete_api_without_namespace(path, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == API_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::delete().to( - move |path| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .delete_api_with_namespace(path, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::delete().to( - move |path| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .delete_apis_without_namespace(path, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::delete().to( - move |path| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .delete_apis_with_namespace(path, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } + let chunk = chunk.unwrap(); + // limit max size of in-memory payload + if (body.len() + chunk.len()) > 262_144 { + return Err(parse_chunk_error); } + body.extend_from_slice(&chunk); + } - // URL是手动注册的,不会存在APIS_WITHOUT_NAMESPACE, APIS_WITH_NAMESPACE, - // API_WITHOUT_NAMESPACE, API_WITH_NAMESPACE之外情况 - for (key, route) in config.update_routes() { - if route == API_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::put().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .update_api_without_namespace(path, data, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == API_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::put().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .update_api_with_namespace(path, data, &mut conn, nats_cli,event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::put().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .update_apis_without_namespace(path, data, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); - - app = app.route(key, web::put().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - let event_manager = event_manager.clone(); + let body = serde_json::from_slice::(&body); + if body.is_err() { + return Err(APIServerError::bad_request("payload is not a valid json")); + } + Ok(body.unwrap()) + } - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .update_apis_with_namespace(path, data, &mut conn, nats_cli, event_manager) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); + async fn handler(method: Method, query: web::Query, path_params: web::Path, state: web::Data, payload: web::Payload) + -> HttpResponse { + let body = Self::parse_body(payload).await; + log::info!("ApiServerHandler: method: {:?}, path_params: {:?}", method, path_params); + let handler = state.handler.clone(); + match method { + Method::POST | Method::PUT | Method::PATCH => { + if body.is_err() { + return HttpResponse::from(APIServerResponse::from(body)); } } - - // URL是手动注册的,不会存在APIS_WITHOUT_NAMESPACE, APIS_WITH_NAMESPACE, - // API_WITHOUT_NAMESPACE, API_WITH_NAMESPACE之外情况 - for (key, route) in config.getone_routes() { - if route == API_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .getone_api_without_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == API_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .getone_api_with_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .getone_apis_without_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .getone_apis_with_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } + _ => {} + } + match method { + Method::GET => { + handler.get_resource(path_params.into_inner(), query.into_inner(), state.into_inner()).await } - - - // URL是手动注册的,不会存在APIS_WITHOUT_NAMESPACE, APIS_WITH_NAMESPACE, - // API_WITHOUT_NAMESPACE, API_WITH_NAMESPACE之外情况 - for (key, route) in config.listall_routes() { - if route == API_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .listall_api_without_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == API_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .listall_api_with_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .listall_apis_without_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } else if route == APIS_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let db_pool = Arc::clone(&db_pool); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => { - handler - .default() - .listall_apis_with_namespace(path, data, &mut conn, nats_cli) - .await - }, - Err(_) => Err(actix_web::error::ErrorInternalServerError( - "Failed to get database connection", - )), - } - } - })); - } + Method::POST => { + handler.create_resource(path_params.into_inner(), body.unwrap(), state.into_inner()).await } - - for (key, route) in config.watchall_routes() { - if route == API_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchall_api_without_namespace(path, data, event_manager) - .await - } - })); - } else if route == API_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchall_api_with_namespace(path, data, event_manager) - .await - } - })); - } else if route == APIS_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchall_apis_without_namespace(path, data, event_manager) - .await - } - })); - } else if route == APIS_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchall_apis_with_namespace(path, data, event_manager) - .await - } - })); - } + Method::PUT => { + handler.update_resource(path_params.into_inner(), body.unwrap(), state.into_inner()).await } - - for (key, route) in config.watchone_routes() { - if route == API_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchone_api_without_namespace(path, data, event_manager) - .await - } - })); - } else if route == API_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchone_api_with_namespace(path, data, event_manager) - .await - } - })); - } else if route == APIS_WITHOUT_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchone_apis_without_namespace(path, data, event_manager) - .await - } - })); - } else if route == APIS_WITH_NAMESPACE { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - app = app.route(key, web::get().to( - move |path, data| { - let handler = Arc::clone(&handler); - let event_manager = event_manager.clone(); - - async move { - handler - .default() - .watchone_apis_with_namespace(path, data, event_manager) - .await - } - })); - } + Method::DELETE => { + handler.delete_resource(path_params.into_inner(), state.into_inner()).await } - - // TODO 与Kubernetes的状态一致 - app.default_service(web::to(move || async { - Ok::( - HttpResponse::NotFound().body("invalid url, see https://gitee.com/iscas-system/apiserver/wikis/pages?sort_id=12661944&doc_id=6135774")) - })) - - }); - - // 考虑到资源约束,目前只启动一个worker - // TODO 未来用户可以自定义该配置worker数 - server.workers(1).bind(addr).expect("reason").run().await + Method::PATCH => { + handler.patch_resource(path_params.into_inner(), body.unwrap(), state.into_inner()).await + } + _ => { + HttpResponse::from(APIServerResponse::from(Err::(APIServerError::bad_request("unsupported method")))) + } + } } - -} \ No newline at end of file +} diff --git a/src/cores/checker.rs b/src/cores/checker.rs index ab13881beb46ea7871a81d52ffe50387151c2ad4..a176dc3918660dc65b8e0ab243f8881157f50a76 100644 --- a/src/cores/checker.rs +++ b/src/cores/checker.rs @@ -1,21 +1,21 @@ /** * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences - * author: wuheng@iscas.ac.cn - * since: 0.1.3 + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 * **/ use jsonschema::Validator; + +use actix_web::Result; use serde_json::Value; -use actix_web::{Result}; use std::error::Error; use std::fs::File; use std::io::Read; // TODO,未来能自动从openapi中获取Schema pub fn validate_pod_json(json_str: &str) -> Result> { - let mut file = File::open("schemas/pod-schema.json")?; let mut contents = String::new(); file.read_to_string(&mut contents)?; diff --git a/src/cores/config.rs b/src/cores/config.rs deleted file mode 100644 index f741fcaad6e9c7380b723e998dbb51ff7aab1db5..0000000000000000000000000000000000000000 --- a/src/cores/config.rs +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences - * author: wuheng@iscas.ac.cn - * since: 0.1.0 - * -**/ - -use std::collections::HashMap; - -// 执行kubectl api-resources,有以下输出,其中APIVERSION为v1的都在URL都是/api/开头,其它的都是/apis/开头,是否支持namespace -// 由NAMESPACED取值确定。因此,定于API_WITH_NAMESPACE、API_WITHOUT_NAMESPACE、APIS_WITH_NAMESPACE、APIS_WITHOUT_NAMESPACE -// 四种情况 -// -// NAME SHORTNAMES APIVERSION NAMESPACED KIND -// bindings v1 true Binding -// componentstatuses cs v1 false ComponentStatus -// configmaps cm v1 true ConfigMap -// endpoints ep v1 true Endpoints -// events ev v1 true Event -// limitranges limits v1 true LimitRange -// namespaces ns v1 false Namespace -// nodes no v1 false Node -// persistentvolumeclaims pvc v1 true PersistentVolumeClaim -// persistentvolumes pv v1 false PersistentVolume -// pods po v1 true Pod -// podtemplates v1 true PodTemplate -// replicationcontrollers rc v1 true ReplicationController -// resourcequotas quota v1 true ResourceQuota -// secrets v1 true Secret -// serviceaccounts sa v1 true ServiceAccount -// services svc v1 true Service -// mutatingwebhookconfigurations admissionregistration.k8s.io/v1 false MutatingWebhookConfiguration -// validatingwebhookconfigurations admissionregistration.k8s.io/v1 false ValidatingWebhookConfiguration -// customresourcedefinitions crd,crds apiextensions.k8s.io/v1 false CustomResourceDefinition -// apiservices apiregistration.k8s.io/v1 false APIService -// controllerrevisions apps/v1 true ControllerRevision -// daemonsets ds apps/v1 true DaemonSet -// deployments deploy apps/v1 true Deployment -// replicasets rs apps/v1 true ReplicaSet -// statefulsets sts apps/v1 true StatefulSet -// selfsubjectreviews authentication.k8s.io/v1 false SelfSubjectReview -// tokenreviews authentication.k8s.io/v1 false TokenReview -// localsubjectaccessreviews authorization.k8s.io/v1 true LocalSubjectAccessReview -// selfsubjectaccessreviews authorization.k8s.io/v1 false SelfSubjectAccessReview -// selfsubjectrulesreviews authorization.k8s.io/v1 false SelfSubjectRulesReview -// subjectaccessreviews authorization.k8s.io/v1 false SubjectAccessReview -// horizontalpodautoscalers hpa autoscaling/v2 true HorizontalPodAutoscaler -// cronjobs cj batch/v1 true CronJob -// jobs batch/v1 true Job -// certificatesigningrequests csr certificates.k8s.io/v1 false CertificateSigningRequest -// leases coordination.k8s.io/v1 true Lease -// endpointslices discovery.k8s.io/v1 true EndpointSlice -// events ev events.k8s.io/v1 true Event -// flowschemas flowcontrol.apiserver.k8s.io/v1beta3 false FlowSchema -// prioritylevelconfigurations flowcontrol.apiserver.k8s.io/v1beta3 false PriorityLevelConfiguration -// ingressclasses networking.k8s.io/v1 false IngressClass -// ingresses ing networking.k8s.io/v1 true Ingress -// networkpolicies netpol networking.k8s.io/v1 true NetworkPolicy -// runtimeclasses node.k8s.io/v1 false RuntimeClass -// poddisruptionbudgets pdb policy/v1 true PodDisruptionBudget -// clusterrolebindings rbac.authorization.k8s.io/v1 false ClusterRoleBinding -// clusterroles rbac.authorization.k8s.io/v1 false ClusterRole -// rolebindings rbac.authorization.k8s.io/v1 true RoleBinding -// roles rbac.authorization.k8s.io/v1 true Role -// priorityclasses pc scheduling.k8s.io/v1 false PriorityClass -// csidrivers storage.k8s.io/v1 false CSIDriver -// csinodes storage.k8s.io/v1 false CSINode -// csistoragecapacities storage.k8s.io/v1 true CSIStorageCapacity -// storageclasses sc storage.k8s.io/v1 false StorageClass -// volumeattachments storage.k8s.io/v1 false VolumeAttachment -pub const API_WITH_NAMESPACE: &str = "api_with_namespace"; -pub const API_WITHOUT_NAMESPACE: &str = "api_without_namespace"; -pub const APIS_WITH_NAMESPACE: &str = "apis_with_namespace"; -pub const APIS_WITHOUT_NAMESPACE: &str = "apis_without_namespace"; - - -// 定义URL配置的接口,用户可以自定义自己需要的URL规则 -// 具体规则参见https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/ -// TODO 现在方法名定义容易导致误解,后续修改 -// TODO 新增WatchOne和WatchAll两个方法 -pub trait Config: Sync + Send { - - // 创建一个对象(如Pod、Job的具体实例)的URL和Web服务器路由的映射关系 - fn create_routes(&self) -> &HashMap; - // 删除指定对象(如Pod、Job的具体实例)的URL列表和Web服务器路由的映射关系 - fn delete_routes(&self) -> &HashMap; - // 更新指定对象(如Pod、Job的具体实例)的URL列表和Web服务器路由的映射关系 - fn update_routes(&self) -> &HashMap; - // 获取指定对象(如Pod、Job的具体实例)的URL列表和Web服务器路由的映射关系 - fn getone_routes(&self) -> &HashMap; - // 获取指定类型(如Pod、Job的所有实例)的URL列表和Web服务器路由的映射关系 - fn listall_routes(&self) -> &HashMap; - - fn watchall_routes(&self) -> &HashMap; - - fn watchone_routes(&self) -> &HashMap; -} - -// Config接口的默认实现,用户可以自定义自己需要的URL规则 -// DefaultConfig实现了Kubernetes风格的URL -#[derive(Clone)] -pub struct DefaultConfig { - create_routes: HashMap, - delete_routes: HashMap, - update_routes: HashMap, - getone_routes: HashMap, - listall_routes: HashMap, - watchall_routes: HashMap, - watchone_routes: HashMap, -} - -impl DefaultConfig { - pub fn new() -> Self { - DefaultConfig { - create_routes: { - let mut map: HashMap = HashMap::new(); - // create a specific resource: POST - // - with namespace: /api(s)/{group}/{version}/namespaces/{namespace}/{plural} - // - without namespace/api(s)/{group}/{version}/namespaces/{plural} - map.insert("/api/{version}/namespaces/{namespace}/{plural}".to_string(), API_WITH_NAMESPACE.to_string()); - map.insert("/api/{version}/{plural}".to_string(), API_WITHOUT_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/namespaces/{namespace}/{plural}".to_string(), APIS_WITH_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/{plural}".to_string(), APIS_WITHOUT_NAMESPACE.to_string()); - map - }, - delete_routes: { - let mut map: HashMap = HashMap::new(); - // delete a specific resource: DELETE - // - with namespace: /api(s)/{group}/{version}/namespaces/{namespace}/{plural}/{name} - // - without namespace/api(s)/{group}/{version}/namespaces/{plural}/{name} - map.insert("/api/{version}/namespaces/{namespace}/{plural}/{name}".to_string(), API_WITH_NAMESPACE.to_string()); - map.insert("/api/{version}/{plural}/{name}".to_string(), API_WITHOUT_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}".to_string(), APIS_WITH_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/{plural}/{name}".to_string(), APIS_WITHOUT_NAMESPACE.to_string()); - map - }, - update_routes: { - let mut map: HashMap = HashMap::new(); - // update a specific resource:PUT - // - with namespace: /api(s)/{group}/{version}/namespaces/{namespace}/{plural}/{name} - // - without namespace/api(s)/{group}/{version}/namespaces/{plural}/{name} - map.insert("/api/{version}/namespaces/{namespace}/{plural}/{name}".to_string(), API_WITH_NAMESPACE.to_string()); - map.insert("/api/{version}/{plural}/{name}".to_string(), API_WITHOUT_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}".to_string(), APIS_WITH_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/{plural}/{name}".to_string(), APIS_WITHOUT_NAMESPACE.to_string()); - map - }, - getone_routes: { - let mut map: HashMap = HashMap::new(); - // get a specific resource: GET - // - with namespace: /api(s)/{group}/{version}/namespaces/{namespace}/{plural}/{name} - // - without namespace/api(s)/{group}/{version}/namespaces/{plural}/{name} - map.insert("/api/{version}/namespaces/{namespace}/{plural}/{name}".to_string(), API_WITH_NAMESPACE.to_string()); - map.insert("/api/{version}/{plural}/{name}".to_string(), API_WITHOUT_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}".to_string(), APIS_WITH_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/{plural}/{name}".to_string(), APIS_WITHOUT_NAMESPACE.to_string()); - map - }, - listall_routes: { - let mut map: HashMap = HashMap::new(); - // get some resources: GET - // - with namespace: /api(s)/{group}/{version}/namespaces/{namespace}/{plural} - // - without namespace/api(s)/{group}/{version}/namespaces/{plural} - map.insert("/api/{version}/namespaces/{namespace}/{plural}".to_string(), API_WITH_NAMESPACE.to_string()); - map.insert("/api/{version}/{plural}".to_string(), API_WITHOUT_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/namespaces/{namespace}/{plural}".to_string(), APIS_WITH_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/{plural}".to_string(), APIS_WITHOUT_NAMESPACE.to_string()); - map - }, - watchall_routes: { - let mut map: HashMap = HashMap::new(); - // watch some resources: GET - // - with namespace: /api(s)/{group}/{version}/watch/namespaces/{namespace}/{plural} - // - without namespace/api(s)/{group}/{version}/watch/namespaces/{plural} - map.insert("/api/{version}/watch/namespaces/{namespace}/{plural}".to_string(), API_WITH_NAMESPACE.to_string()); - map.insert("/api/{version}/watch/{plural}".to_string(), API_WITHOUT_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/watch/namespaces/{namespace}/{plural}".to_string(), APIS_WITH_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/watch/{plural}".to_string(), APIS_WITHOUT_NAMESPACE.to_string()); - map - }, - watchone_routes: { - let mut map: HashMap = HashMap::new(); - // watch one resource: GET - // - with namespace: /api(s)/{group}/{version}/watch/namespaces/{namespace}/{plural}/{name} - // - without namespace/api(s)/{group}/{version}/watch/namespaces/{plural}/{name} - map.insert("/api/{version}/watch/namespaces/{namespace}/{plural}/{name}".to_string(), API_WITH_NAMESPACE.to_string()); - map.insert("/api/{version}/watch/{plural}/{name}".to_string(), API_WITHOUT_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/watch/namespaces/{namespace}/{plural}/{name}".to_string(), APIS_WITH_NAMESPACE.to_string()); - map.insert("/apis/{group}/{version}/watch/{plural}/{name}".to_string(), APIS_WITHOUT_NAMESPACE.to_string()); - map - } - } - } -} - - -// 返回所有的注册URL和路由关系集合 -impl Config for DefaultConfig { - - fn create_routes(&self) -> &HashMap { - &self.create_routes - } - - fn delete_routes(&self) -> &HashMap { - &self.delete_routes - } - - fn update_routes(&self) -> &HashMap { - &self.update_routes - } - - fn getone_routes(&self) -> &HashMap { - &self.getone_routes - } - - fn listall_routes(&self) -> &HashMap { &self.listall_routes } - - fn watchall_routes(&self) -> &HashMap { &self.watchall_routes } - - fn watchone_routes(&self) -> &HashMap { &self.watchone_routes } -} diff --git a/src/cores/events.rs b/src/cores/events.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b06c08558a4854f5e1b2673a27283fe578fe86e --- /dev/null +++ b/src/cores/events.rs @@ -0,0 +1,537 @@ +use crate::cores::apiserver::AppState; +use crate::cores::services::{APIServerError, APIServerResult, APIServerService, APIServerServiceParams}; +use anyhow::Result; +use feventbus::err::Error as FEventBusError; +use feventbus::impls::nats::nats::NatsCli; +use feventbus::message::{Message, NativeEventAction}; +use feventbus::traits::consumer::{Consumer, MessageHandler}; +use feventbus::traits::producer::Producer; +use fleetmod::utils::{APIVersion, ResourceCommon}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use serde_json::error::Error as SerdeError; +use serde_json::Value; +use std::fmt::{Debug, Display, Formatter}; +use std::future::Future; +use std::sync::Arc; +use std::time; +use strum::{EnumIter, IntoEnumIterator}; +use tokio::sync::{mpsc, Mutex}; +use tokio::time::sleep; +use fleetmod::FleetResource; + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub enum EventTopic { + // P2P events表示其他组件向api server发送的点对点事件 + P2P(P2PEventTopic), + // PubSub events表示api server向其他组件发送的广播事件 + PubSub(PubSubEventTopic), +} + +impl Display for EventTopic { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + EventTopic::P2P(topic) => write!(f, "P2P({:?})", topic), + EventTopic::PubSub(topic) => write!(f, "PubSub({:?})", topic), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, EnumIter, PartialEq)] +pub enum P2PEventTopic { + Create, + Update, + Patch, + Delete, + List, + StartsWatch, + EndsWatch, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub enum PubSubEventTopic { + Watch(ResourceCollectionIdentifier), +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] +pub struct ResourceCollectionIdentifier { + pub api_version: APIVersion, + pub kind: String, + pub namespace: Option, + // todo! 未来支持更多字段的过滤,如label,annotation等 +} + +impl ResourceCollectionIdentifier { + // 如果value是一个合法的FleetResource,并且符合当前identifier的筛选规则,则返回true + pub fn fits(&self, value: Value) -> bool { + // ResourceCommon包含api_version, kind, metadata等所有资源的共有字段 + let resource_common: ResourceCommon = match serde_json::from_value(value) { + Ok(v) => v, + Err(_) => return false, + }; + if resource_common.api_version != self.api_version { + return false; + } + if resource_common.kind != self.kind { + return false; + } + if resource_common.metadata.namespace != self.namespace { + return false; + } + true + } +} + +// Create, Update, Patch使用WriteEventRequest +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct WriteEventRequest { + pub params: APIServerServiceParams, + pub data: Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct WriteEventResponse { + pub res: APIServerResult, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DeleteEventRequest { + pub params: APIServerServiceParams, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DeleteEventResponse { + pub res: APIServerResult, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ReadEventRequest { + pub params: APIServerServiceParams, + pub query: Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ReadEventResponse { + pub res: APIServerResult, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct StartsWatchRequest { + pub ri: ResourceCollectionIdentifier, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct StartsWatchResponse {} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct EndsWatchRequest { + pub ri: ResourceCollectionIdentifier, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct EndsWatchResponse {} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct WatchEventMessage { + pub values: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub enum WatchEventMessageValue { + Created(Value), + Updated(Value), + Deleted(Value), +} + +impl WatchEventMessageValue { + pub fn get_value(&self) -> &Value { + match self { + WatchEventMessageValue::Created(v) => v, + WatchEventMessageValue::Updated(v) => v, + WatchEventMessageValue::Deleted(v) => v, + } + } +} + +pub struct P2PEventServer { + app_state: Arc, + service: Arc, +} + +impl P2PEventServer { + pub fn new(app_state: Arc) -> Self { + Self { + app_state, + service: Arc::new(APIServerService::new()), + } + } + + pub fn start(&self) { + let nats_cli = self.app_state.nats_cli.clone(); + self.start_reply(nats_cli.clone()); + } + + pub async fn reply_list(service: Arc, app_state: Arc, req: ReadEventRequest) -> ReadEventResponse { + let res = service.get_resource(req.params, req.query, app_state.clone()).await; + ReadEventResponse { res } + } + + pub async fn reply_create(service: Arc, app_state: Arc, req: WriteEventRequest) -> WriteEventResponse { + let res = service.create_resource(req.params, req.data, app_state.clone()).await; + WriteEventResponse { res } + } + + pub async fn reply_update(service: Arc, app_state: Arc, req: WriteEventRequest) -> WriteEventResponse { + let res = service.update_resource(req.params, req.data, app_state.clone()).await; + WriteEventResponse { res } + } + + pub async fn reply_patch(service: Arc, app_state: Arc, req: WriteEventRequest) -> WriteEventResponse { + let res = service.patch_resource(req.params, req.data, app_state).await; + WriteEventResponse { res } + } + + pub async fn reply_delete(service: Arc, app_state: Arc, req: DeleteEventRequest) -> DeleteEventResponse { + let res = service.delete_resource(req.params, app_state).await; + DeleteEventResponse { res } + } + + pub async fn reply_starts_watch(_: Arc, app_state: Arc, req: StartsWatchRequest) -> StartsWatchResponse { + let watch_event = PubSubEventTopic::Watch(req.ri); + app_state.watch_event_publisher.add_pub_sub_event_topic(watch_event).await; + StartsWatchResponse {} + } + + pub async fn reply_ends_watch(_service: Arc, _app_state: Arc, _req: EndsWatchRequest) -> EndsWatchResponse { + // 暂时先不实现取消watch:需要维持观察者的状态,以便在取消watch时取消订阅,可以通过心跳来实现 + todo!() + } + + pub async fn do_reply(service: Arc, app_state: Arc, body: Value, reply: F) -> Result + where + I: DeserializeOwned, + O: Serialize, + F: Fn(Arc, Arc, I) -> Fut, + Fut: Future, + { + let req: I = serde_json::from_value(body).map_err(|e: SerdeError| FEventBusError::MessageHandling(e.to_string()))?; + let res = reply(service, app_state, req).await; + Ok(serde_json::to_string(&res).unwrap()) + } + + pub fn get_reply_handler(&self, event_topic: P2PEventTopic) -> MessageHandler + { + let service = self.service.clone(); + let app_state = self.app_state.clone(); + let reply_handler: MessageHandler = Arc::new(move |msg: Message| { + let event_topic = event_topic.clone(); + let service = service.clone(); + let app_state = app_state.clone(); + Box::pin(async move { + if msg.body.is_none() { + return Err(FEventBusError::MessageHandling( + "Message body is null".to_string(), + )); + } + let body = msg.body.unwrap(); + match event_topic { + P2PEventTopic::List => Self::do_reply(service, app_state, body, P2PEventServer::reply_list).await, + P2PEventTopic::Create => Self::do_reply(service, app_state, body, P2PEventServer::reply_create).await, + P2PEventTopic::Update => Self::do_reply(service, app_state, body, P2PEventServer::reply_update).await, + P2PEventTopic::Patch => Self::do_reply(service, app_state, body, P2PEventServer::reply_patch).await, + P2PEventTopic::Delete => Self::do_reply(service, app_state, body, P2PEventServer::reply_delete).await, + P2PEventTopic::StartsWatch => Self::do_reply(service, app_state, body, P2PEventServer::reply_starts_watch).await, + P2PEventTopic::EndsWatch => Self::do_reply(service, app_state, body, P2PEventServer::reply_ends_watch).await, + } + }) + }); + reply_handler + } + + pub fn start_reply(&self, nats_cli: Arc) { + let mut topics = Vec::new(); + for p2p in P2PEventTopic::iter() { + topics.push(EventTopic::P2P(p2p.clone())); + } + for t in topics { + let topic_str = t.to_string(); + let p2p_topic = match t { + EventTopic::P2P(topic) => topic, + _ => unreachable!(), + }; + let reply_handler = self.get_reply_handler(p2p_topic); + let nats_cli = nats_cli.clone(); + tokio::spawn(async move { + log::info!("Registering reply handler for topic {}", topic_str); + if let Err(e) = nats_cli + .reply(topic_str.as_str(), reply_handler) + .await { + log::error!("Failed to register reply handler for topic {}: {}", topic_str, e); + } + }); + } + } +} + +pub struct WatchEventPublisher { + nats_cli: Arc, + pub_sub_event_topics: Arc>>, + sender: mpsc::Sender, + receiver: Arc>>, +} + +impl WatchEventPublisher { + pub fn new(nats_cli: Arc) -> Self { + let (sx, rx) = mpsc::channel(100); + Self { + nats_cli, + // todo! pub_sub_event_topics在重启后会清空,需要持久化 + pub_sub_event_topics: Arc::new(Mutex::new(Vec::new())), + sender: sx, + receiver: Arc::new(Mutex::new(rx)), + } + } + + pub async fn add_pub_sub_event_topic(&self, topic: PubSubEventTopic) { + match topic { + PubSubEventTopic::Watch(_) => {} + // _ => return anyhow::bail!("WatchEventPublisher不支持Watch以外的事件类型"), + }; + let mut topics = self.pub_sub_event_topics.lock().await; + if topics.contains(&topic) { + return; + } + topics.push(topic); + // todo! 持久化 + } + + pub async fn remove_pub_sub_event_topic(&self, topic: PubSubEventTopic) { + let mut topics = self.pub_sub_event_topics.lock().await; + if let Some(index) = topics.iter().position(|x| *x == topic) { + topics.remove(index); + } + } + + pub fn start(&self) { + let rx = self.receiver.clone(); + let nats_cli = self.nats_cli.clone(); + let pub_sub_event_topics = self.pub_sub_event_topics.clone(); + tokio::spawn(async move { + log::info!("WatchEventPublisher started receiving messages"); + let mut rx = rx.lock().await; // 锁定接收器 + let buffer_limit = 32; + let mut buffer = Vec::with_capacity(buffer_limit); + loop { + sleep(time::Duration::from_millis(500)).await; + let received_cnt = rx.recv_many(&mut buffer, buffer_limit).await; + if received_cnt == 0 { + continue; + } + log::info!("WatchEventPublisher Received {} messages", received_cnt); + Self::publish_events_to_topics(nats_cli.clone(), pub_sub_event_topics.clone(), &buffer).await; + } + }); + } + + pub async fn publish_events_to_topics(nats_cli: Arc, pub_sub_event_topics: Arc>>, msg_values: &Vec) { + for topic in pub_sub_event_topics.lock().await.iter() { + let mut identified_values = Vec::new(); + for msg_value in msg_values.iter() { + let identifier = match topic { + PubSubEventTopic::Watch(identifier) => { + identifier.clone() + } + }; + let v = msg_value.get_value(); + if identifier.fits(v.clone()) { + identified_values.push(msg_value.clone()); + } + } + if identified_values.len() > 0 { + let to_publish = Message::new( + EventTopic::PubSub(topic.clone()).to_string(), + NativeEventAction::Other, // 该字段暂时无用 + None, // metadata + Some(identified_values), // body + None, // created_at + ); + if let Err(e) = nats_cli.publish(to_publish).await { + log::error!("WatchEventPublisher Failed to publish event: {}", e); + } + } + } + } + + pub async fn publish_create_event(&self, data: Value) { + if let Err(e) = self.sender.send(WatchEventMessageValue::Created(data)).await { + log::error!("Failed to publish create event: {}", e); + } + } + + pub async fn publish_update_event(&self, data: Value) { + if let Err(e) = self.sender.send(WatchEventMessageValue::Updated(data)).await { + log::error!("Failed to publish update event: {}", e); + } + } + + pub async fn publish_delete_event(&self, data: Value) { + if let Err(e) = self.sender.send(WatchEventMessageValue::Deleted(data)).await { + log::error!("Failed to publish delete event: {}", e); + } + } +} + +pub struct APIServerEventClient { + nats_cli: Arc, + timeout: time::Duration, +} + +impl APIServerEventClient { + pub fn new(nats_cli: Arc, timeout: Option) -> Self { + Self { + nats_cli, + timeout: timeout.unwrap_or(time::Duration::from_secs(60)), + } + } + + fn new_message(topic: P2PEventTopic, req: R) -> Message + where + R: Clone + Serialize + DeserializeOwned, + { + let msg = Message::new( + EventTopic::P2P(topic).to_string(), + NativeEventAction::Other, + None, + Some(req), + None, + ); + msg + } + + fn map_serde_err(e: SerdeError) -> APIServerError { + APIServerError::internal_error(format!("err serde processing event msg: {}", e).as_str()) + } + + fn data_to_value(data: T) -> APIServerResult + where + T: Serialize, + { + Ok(serde_json::to_value(data).map_err(Self::map_serde_err)?) + } + + fn value_to_data(value: Value) -> APIServerResult + where + T: DeserializeOwned, + { + Ok(serde_json::from_value(value).map_err(Self::map_serde_err)?) + } + + fn parse_msg_raw_string(res: String) -> APIServerResult + where + T: DeserializeOwned, + { + let res: T = serde_json::from_str(&res).map_err(Self::map_serde_err)?; + Ok(res) + } + + async fn write_event(&self, topic: P2PEventTopic, params: APIServerServiceParams, data: Value) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone, + { + let msg = Self::new_message(topic, WriteEventRequest { params, data }); + let resp_str = self.nats_cli.request(msg, self.timeout).await?; + let resp: WriteEventResponse = Self::parse_msg_raw_string(resp_str)?; + Self::value_to_data(resp.res?) + } + + pub async fn create(&self, params: APIServerServiceParams, data: T) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone, + { + self.write_event(P2PEventTopic::Create, params, Self::data_to_value(data)?).await + } + + pub async fn create_by_resource(&self, data: T) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone + FleetResource, + { + let mut params = APIServerServiceParams::from_resource(&data); + params.name = None; // 创建资源时在路径参数中不要包含name + self.create(params, data).await + } + + pub async fn delete(&self, params: APIServerServiceParams) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone, + { + let msg = Self::new_message(P2PEventTopic::Delete, DeleteEventRequest { params }); + let resp_str = self.nats_cli.request(msg, self.timeout).await?; + let resp: WriteEventResponse = Self::parse_msg_raw_string(resp_str)?; + Self::value_to_data(resp.res?) + } + + pub async fn delete_by_resource(&self, data: T) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone + FleetResource, + { + let params = APIServerServiceParams::from_resource(&data); + self.delete(params).await + } + + pub async fn update(&self, params: APIServerServiceParams, data: T) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone + FleetResource, + { + self.write_event(P2PEventTopic::Update, params, Self::data_to_value(data)?).await + } + + pub async fn update_by_resource(&self, data: T) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone + FleetResource, + { + let params = APIServerServiceParams::from_resource(&data); + self.write_event(P2PEventTopic::Update, params, Self::data_to_value(data)?).await + } + + pub async fn patch(&self, params: APIServerServiceParams, data: Value) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone, + { + self.write_event(P2PEventTopic::Patch, params, data).await + } + + async fn read_event(&self, topic: P2PEventTopic, params: APIServerServiceParams, query: Value) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone, + { + let msg = Self::new_message(topic, ReadEventRequest { params, query }); + let resp_str = self.nats_cli.request(msg, self.timeout).await?; + let resp: ReadEventResponse = Self::parse_msg_raw_string(resp_str)?; + Self::value_to_data(resp.res?) + } + + pub async fn get(&self, params: APIServerServiceParams, query: Value) -> APIServerResult + where + T: Serialize + DeserializeOwned + Clone, + { + self.read_event(P2PEventTopic::List, params, query).await + } + + pub async fn list(&self, params: APIServerServiceParams, query: Value) -> APIServerResult> + where + T: Serialize + DeserializeOwned + Clone, + { + self.read_event(P2PEventTopic::List, params, query).await + } + + pub async fn starts_watch(&self, ri: ResourceCollectionIdentifier) -> APIServerResult<()> { + let msg = Self::new_message(P2PEventTopic::StartsWatch, StartsWatchRequest { ri }); + let resp_str = self.nats_cli.request(msg, self.timeout).await?; + let _resp: StartsWatchResponse = Self::parse_msg_raw_string(resp_str)?; + Ok(()) + } + + pub async fn ends_watch(&self, _ri: ResourceCollectionIdentifier) -> APIServerResult<()> { + todo!() + } +} \ No newline at end of file diff --git a/src/cores/handlers.rs b/src/cores/handlers.rs index 4aca85807939fca3dedb41034df4ba2a33930c9b..41357b8f384d4ff92886fb4147c93b1f7e589606 100644 --- a/src/cores/handlers.rs +++ b/src/cores/handlers.rs @@ -1,291 +1,47 @@ -use std::collections::HashMap; -use std::fmt::Debug; -use std::sync::{Arc, LazyLock, Mutex}; -use std::{time}; +use crate::cores::apiserver::{AppState, K8sStylePathParams}; +use crate::cores::services::{APIServerResult, APIServerService, APIServerStatusCode}; +use actix_web::HttpResponse; use async_trait::async_trait; -use actix_web::{HttpResponse, Result, web, Error}; -use chrono::Utc; -use diesel::{QueryResult, RunQueryDsl, QueryDsl, ExpressionMethods, OptionalExtension, QueryableByName, PgConnection, sql_query, SqliteConnection, Connection}; -use serde_json::{json}; -use serde_json::Value; -use crate::cores::db::DbConnection; -use diesel::sql_types::{Text}; -use actix_web::error::ErrorInternalServerError; -use actix_web::web::{Path, Query}; -use feventbus::impls::nats::nats::NatsCli; -use feventbus::message::Message; -use feventbus::message; -use feventbus::traits::producer::Producer; +use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use tokio::sync::broadcast; -use actix_web::web::Bytes; - -// 定义全局哈希表来获取model名 -static GLOBAL_HASHMAP: LazyLock>> = LazyLock::new(|| { - let mut map = HashMap::new(); - map.insert("cargos".to_string(), "CARGO".to_string()); - map.insert("nodes".to_string(), "NODE".to_string()); - map.insert("jobs".to_string(), "JOB".to_string()); - Mutex::new(map) -}); - -// 添加键值对 -fn insert_key_value(key: &str, value: &str) { - let mut map = GLOBAL_HASHMAP.lock().unwrap(); - map.insert(key.to_string(), value.to_string()); -} - -// 获取键对应的值 -fn get_value(key: &str) -> Option { - let map = GLOBAL_HASHMAP.lock().unwrap(); - map.get(key).cloned() -} +use serde_json::Value; +use std::sync::Arc; #[async_trait] pub trait Handler { - // /api/{version}/{plural} - async fn create_api_without_namespace( - &self, - info: web::Path<(String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /api/{version}/namespaces/{namespace}/{plural} - async fn create_api_with_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /apis/{group}/{version}/{plural} - async fn create_apis_without_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /apis/{group}/{version}/namespaces/{namespace}/{plural} - async fn create_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /api/{version}/{plural}/{name} - async fn delete_api_without_namespace( - &self, - info: web::Path<(String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /api/{version}/namespaces/{namespace}/{plural}/{name} - async fn delete_api_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /apis/{group}/{version}/{plural}/{name} - async fn delete_apis_without_namespace( - &self, - info: web::Path<(String, String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name} - async fn delete_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /api/{version}/{plural}/{name} - async fn update_api_without_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /api/{version}/namespaces/{namespace}/{plural}/{name} - async fn update_api_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /apis/{group}/{version}/{plural}/{name} - async fn update_apis_without_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name} - async fn update_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager - ) -> Result; - - // /api/{version}/{plural}/{name} - async fn getone_api_without_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - // /api/{version}/namespaces/{namespace}/{plural}/{name} - async fn getone_api_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - // /apis/{group}/{version}/{plural}/{name} - async fn getone_apis_without_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - // /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name} - async fn getone_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - // /api/{version}/{plural} - async fn listall_api_without_namespace( - &self, - info: web::Path<(String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - // /api/{version}/namespaces/{namespace}/{plural} - async fn listall_api_with_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - // /apis/{group}/{version}/{plural} - async fn listall_apis_without_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - // /apis/{group}/{version}/namespaces/{namespace}/{plural} - async fn listall_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Query, - db_connection: &mut DbConnection, - nats_cli: Arc, - ) -> Result; - - async fn watchall_api_without_namespace( - &self, - info: web::Path<(String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; - - async fn watchall_api_with_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; - - async fn watchall_apis_without_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; - - async fn watchall_apis_with_namespace( + async fn create_resource( &self, - info: web::Path<(String, String, String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; + params: K8sStylePathParams, + data: Value, + app_state: Arc, + ) -> HttpResponse; - async fn watchone_api_without_namespace( + async fn delete_resource( &self, - info: web::Path<(String, String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; + params: K8sStylePathParams, + app_state: Arc, + ) -> HttpResponse; - async fn watchone_api_with_namespace( + async fn update_resource( &self, - info: web::Path<(String, String, String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; + params: K8sStylePathParams, + data: Value, + app_state: Arc, + ) -> HttpResponse; - async fn watchone_apis_without_namespace( + async fn get_resource( &self, - info: web::Path<(String, String, String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; + params: K8sStylePathParams, + _query: Value, + app_state: Arc, + ) -> HttpResponse; - async fn watchone_apis_with_namespace( + async fn patch_resource( &self, - info: web::Path<(String, String, String, String, String)>, - data: web::Query, - event_manager: EventManager - ) -> Result; + params: K8sStylePathParams, + data: Value, + app_state: Arc, + ) -> HttpResponse; // 不满足以上请求路径的处理 fn default(&self) -> DefaultHandler; @@ -293,3689 +49,80 @@ pub trait Handler { #[derive(Clone)] pub struct DefaultHandler { - + service: APIServerService, } impl DefaultHandler { pub fn new() -> Self { - DefaultHandler {} - } -} - - -// 查询 metadata 表中的 plural 是否存在,并检查 namespace 要求是否满足 -async fn check_metadata( - conn: &mut DbConnection, - plural: &str, - version: &str, - requires_namespace: bool, -) -> QueryResult { - use diesel::dsl::count_star; - use crate::schema::metadata::dsl as metadata_dsl; - use crate::schema::metadata_replica1::dsl as replica1_dsl; - use crate::schema::metadata_replica2::dsl as replica2_dsl; - - let count; - let count_replica1; - let count_replica2; - match conn { - DbConnection::Pg(pg_conn) => { - count = metadata_dsl::metadata - .filter(metadata_dsl::name.eq(plural)) - .filter(metadata_dsl::apigroup.eq(version)) - .filter(metadata_dsl::namespace.eq(requires_namespace)) - .select(count_star()) - .first::(pg_conn)?; - - count_replica1 = replica1_dsl::metadata_replica1 - .filter(replica1_dsl::name.eq(plural)) - .filter(replica1_dsl::apigroup.eq(version)) - .filter(replica1_dsl::namespace.eq(requires_namespace)) - .select(count_star()) - .first::(pg_conn)?; - - count_replica2 = replica2_dsl::metadata_replica2 - .filter(replica2_dsl::name.eq(plural)) - .filter(replica2_dsl::apigroup.eq(version)) - .filter(replica2_dsl::namespace.eq(requires_namespace)) - .select(count_star()) - .first::(pg_conn)?; - }, - DbConnection::Sqlite(sqlite_conn) => { - count = metadata_dsl::metadata - .filter(metadata_dsl::name.eq(plural)) - .filter(metadata_dsl::apigroup.eq(version)) - .filter(metadata_dsl::namespace.eq(requires_namespace)) - .select(count_star()) - .first::(sqlite_conn)?; - - count_replica1 = replica1_dsl::metadata_replica1 - .filter(replica1_dsl::name.eq(plural)) - .filter(replica1_dsl::apigroup.eq(version)) - .filter(replica1_dsl::namespace.eq(requires_namespace)) - .select(count_star()) - .first::(sqlite_conn)?; - - count_replica2 = replica2_dsl::metadata_replica2 - .filter(replica2_dsl::name.eq(plural)) - .filter(replica2_dsl::apigroup.eq(version)) - .filter(replica2_dsl::namespace.eq(requires_namespace)) - .select(count_star()) - .first::(sqlite_conn)?; - } - } - let positive_count = [count, count_replica1, count_replica2].iter().filter(|&&x| x > 0).count(); - Ok(positive_count >= 2) -} - - - -// 查询 kine 表中指定的数据是否存在 -async fn check_kine( - conn: &mut DbConnection, - item_kind: &str, - item_name: &str, - item_version: &str, - item_namespace: Option<&str>, -) -> QueryResult { - use diesel::dsl::count_star; - use crate::schema::kine::dsl as kine_dsl; - use crate::schema::kine_replica1::dsl as replica1_dsl; - use crate::schema::kine_replica2::dsl as replica2_dsl; - - let count; - let count_replica1; - let count_replica2; - match conn { - DbConnection::Pg(pg_conn) => { - if let Some(_) = item_namespace { - count = kine_dsl::kine - .filter(kine_dsl::kind.eq(item_kind)) - .filter(kine_dsl::name.eq(item_name)) - .filter(kine_dsl::apigroup.eq(item_version)) - .filter(kine_dsl::namespace.eq(item_namespace)) - .select(count_star()) - .first::(pg_conn)?; - - count_replica1 = replica1_dsl::kine_replica1 - .filter(replica1_dsl::kind.eq(item_kind)) - .filter(replica1_dsl::name.eq(item_name)) - .filter(replica1_dsl::apigroup.eq(item_version)) - .filter(replica1_dsl::namespace.eq(item_namespace)) - .select(count_star()) - .first::(pg_conn)?; - - count_replica2 = replica2_dsl::kine_replica2 - .filter(replica2_dsl::kind.eq(item_kind)) - .filter(replica2_dsl::name.eq(item_name)) - .filter(replica2_dsl::apigroup.eq(item_version)) - .filter(replica2_dsl::namespace.eq(item_namespace)) - .select(count_star()) - .first::(pg_conn)?; - } else { - count = kine_dsl::kine - .filter(kine_dsl::kind.eq(item_kind)) - .filter(kine_dsl::name.eq(item_name)) - .filter(kine_dsl::apigroup.eq(item_version)) - .select(count_star()) - .first::(pg_conn)?; - - count_replica1 = replica1_dsl::kine_replica1 - .filter(replica1_dsl::kind.eq(item_kind)) - .filter(replica1_dsl::name.eq(item_name)) - .filter(replica1_dsl::apigroup.eq(item_version)) - .select(count_star()) - .first::(pg_conn)?; - - count_replica2 = replica2_dsl::kine_replica2 - .filter(replica2_dsl::kind.eq(item_kind)) - .filter(replica2_dsl::name.eq(item_name)) - .filter(replica2_dsl::apigroup.eq(item_version)) - .select(count_star()) - .first::(pg_conn)?; - } - }, - DbConnection::Sqlite(sqlite_conn) => { - if let Some(_) = item_namespace { - count = kine_dsl::kine - .filter(kine_dsl::kind.eq(item_kind)) - .filter(kine_dsl::name.eq(item_name)) - .filter(kine_dsl::apigroup.eq(item_version)) - .filter(kine_dsl::namespace.eq(item_namespace)) - .select(count_star()) - .first::(sqlite_conn)?; - - count_replica1 = replica1_dsl::kine_replica1 - .filter(replica1_dsl::kind.eq(item_kind)) - .filter(replica1_dsl::name.eq(item_name)) - .filter(replica1_dsl::apigroup.eq(item_version)) - .filter(replica1_dsl::namespace.eq(item_namespace)) - .select(count_star()) - .first::(sqlite_conn)?; - - count_replica2 = replica2_dsl::kine_replica2 - .filter(replica2_dsl::kind.eq(item_kind)) - .filter(replica2_dsl::name.eq(item_name)) - .filter(replica2_dsl::apigroup.eq(item_version)) - .filter(replica2_dsl::namespace.eq(item_namespace)) - .select(count_star()) - .first::(sqlite_conn)?; - } else { - count = kine_dsl::kine - .filter(kine_dsl::kind.eq(item_kind)) - .filter(kine_dsl::name.eq(item_name)) - .filter(kine_dsl::apigroup.eq(item_version)) - .select(count_star()) - .first::(sqlite_conn)?; - - count_replica1 = replica1_dsl::kine_replica1 - .filter(replica1_dsl::kind.eq(item_kind)) - .filter(replica1_dsl::name.eq(item_name)) - .filter(replica1_dsl::apigroup.eq(item_version)) - .select(count_star()) - .first::(sqlite_conn)?; - - count_replica2 = replica2_dsl::kine_replica2 - .filter(replica2_dsl::kind.eq(item_kind)) - .filter(replica2_dsl::name.eq(item_name)) - .filter(replica2_dsl::apigroup.eq(item_version)) - .select(count_star()) - .first::(sqlite_conn)?; - } - } - } - let positive_count = [count, count_replica1, count_replica2].iter().filter(|&&x| x > 0).count(); - Ok(positive_count >= 2) -} - -fn insert_metadata_in_transaction_pg( - transaction: &mut PgConnection, - plural: &str, - version: &str, - namespace_required: bool, - json_data: &Value, -) -> QueryResult<()> { - use diesel::sql_types::{Bool}; - - // 表名列表 - let table_array: [&str; 3] = ["metadata", "metadata_replica1", "metadata_replica2"]; - - for table_name in table_array { - // 使用参数绑定构建插入查询 - let insert_metadata_query = format!( - "INSERT INTO {} (name, namespace, apigroup, data, created_time, updated_time) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT DO NOTHING;", - table_name - ); - - // 执行插入操作 - sql_query(insert_metadata_query) - .bind::(plural) // 名称 - .bind::(namespace_required) // 是否需要命名空间 - .bind::(version) // 版本 - .bind::(json_data.to_string()) // JSON 数据 - .bind::(Utc::now().naive_utc().to_string()) // 创建时间 - .bind::(Utc::now().naive_utc().to_string()) // 更新时间 - .execute(transaction)?; - } - - Ok(()) -} - - -fn insert_metadata_in_transaction_sqlite( - transaction: &mut SqliteConnection, - plural: &str, - version: &str, - namespace_required: bool, - json_data: &Value, -) -> QueryResult<()> { - use diesel::sql_types::{Bool}; - - // 表名列表 - let table_array: [&str; 3] = ["metadata", "metadata_replica1", "metadata_replica2"]; - - for table_name in table_array { - // 使用参数绑定构建插入查询 - let insert_metadata_query = format!( - "INSERT OR IGNORE INTO {} (name, namespace, apigroup, data, created_time, updated_time) - VALUES (?, ?, ?, ?, ?, ?);", - table_name - ); - - // 执行插入操作 - sql_query(insert_metadata_query) - .bind::(plural) // 名称 - .bind::(namespace_required) // 是否需要命名空间 - .bind::(version) // 版本 - .bind::(json_data.to_string()) // JSON 数据 - .bind::(Utc::now().naive_utc().to_string()) // 创建时间 - .bind::(Utc::now().naive_utc().to_string()) // 更新时间 - .execute(transaction)?; - } - - Ok(()) -} - - -async fn insert_metadata( - conn: &mut DbConnection, - plural: &str, - version: &str, - namespace_required: bool, - json_data: &Value -) -> QueryResult<()> { - match conn { - DbConnection::Pg(pg_conn) => { - pg_conn.transaction(|transaction| { - insert_metadata_in_transaction_pg(transaction, plural, version, namespace_required, json_data) - }) - } - DbConnection::Sqlite(sqlite_conn) => { - sqlite_conn.transaction(|transaction| { - insert_metadata_in_transaction_sqlite(transaction, plural, version, namespace_required, json_data) - }) - } - }.expect("unknow conn in insert_metadata"); - Ok(()) -} - -fn insert_kine_in_transaction_pg( - transaction: &mut PgConnection, - item_kind: &str, - item_name: &str, - json_data: &Value, - version: &str, - namespace: Option<&str>, -) -> QueryResult<()> { - - // 表列表 - let table_array: [&str; 3] = ["kine", "kine_replica1", "kine_replica2"]; - - for table_name in table_array { - // 使用参数绑定构建插入查询 - let insert_metadata_query = format!( - "INSERT INTO {} (kind, name, namespace, apigroup, data, created_time, updated_time) - VALUES ($1, $2, $3, $4, $5, $6, $7) - ON CONFLICT DO NOTHING;", - table_name - ); - - // 执行插入操作 - sql_query(insert_metadata_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace.unwrap_or("")) // 空字符串作为默认 namespace - .bind::(version) - .bind::(json_data.to_string()) // 将 JSON 数据转换为字符串 - .bind::(Utc::now().naive_utc().to_string()) // 创建时间 - .bind::(Utc::now().naive_utc().to_string()) // 更新时间 - .execute(transaction)?; - } - - Ok(()) -} - - -fn insert_kine_in_transaction_sqlite( - transaction: &mut SqliteConnection, - item_kind: &str, - item_name: &str, - json_data: &Value, - version: &str, - namespace: Option<&str>, -) -> QueryResult<()> { - - let table_array: [&str; 3] = ["kine", "kine_replica1", "kine_replica2"]; - - for table_name in table_array { - let insert_metadata_query = format!( - "INSERT OR IGNORE INTO {} (kind, name, namespace, apigroup, data, created_time, updated_time) - VALUES (?, ?, ?, ?, ?, ?, ?);", - table_name - ); - - sql_query(insert_metadata_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace.unwrap_or("")) // 使用 Nullable 处理空值 - .bind::(version) - .bind::(json_data.to_string()) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(Utc::now().naive_utc().to_string()) - .execute(transaction)?; - } - Ok(()) -} - - -// 在 kine 表中插入新记录 -async fn insert_kine( - conn: &mut DbConnection, - item_kind: &str, - item_name: &str, - json_data: &Value, - version: &str, - namespace: Option<&str>, -) -> QueryResult<()> { - match conn { - DbConnection::Pg(pg_conn) => { - pg_conn.transaction(|transaction| { - insert_kine_in_transaction_pg(transaction, item_kind, item_name, json_data, version, namespace) - }) - } - DbConnection::Sqlite(sqlite_conn) => { - sqlite_conn.transaction(|transaction| { - insert_kine_in_transaction_sqlite(transaction, item_kind, item_name, json_data, version, namespace) - }) - } - }.expect("unknow conn in insert_kine"); - Ok(()) -} - -// 从 kine 表中删除特定 name 的记录 -async fn delete_from_kine( - conn: &mut DbConnection, - item_kind: &str, - item_name: &str, - item_version: &str, - item_namespace: Option<&str>, -) -> QueryResult { - use diesel::sql_types::Text; - - // 表名列表 - let tables = ["kine", "kine_replica1", "kine_replica2"]; - - // 遍历每个表,执行删除操作 - let mut total_rows_affected = 0; - - for &table in &tables { - let delete_query = if let Some(_) = item_namespace { - match conn { - DbConnection::Pg(_) => format!( - "DELETE FROM {} WHERE kind = $1 AND name = $2 AND namespace = $3 AND apigroup = $4", - table - ), - DbConnection::Sqlite(_) => format!( - "DELETE FROM {} WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?", - table - ), - } - } else { - match conn { - DbConnection::Pg(_) => format!( - "DELETE FROM {} WHERE kind = $1 AND name = $2 AND apigroup = $3", - table - ), - DbConnection::Sqlite(_) => format!( - "DELETE FROM {} WHERE kind = ? AND name = ? AND apigroup = ?", - table - ), - } - }; - - // 执行删除 - let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(delete_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .execute(pg_conn)? - } else { - diesel::sql_query(delete_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .execute(pg_conn)? - } - } - DbConnection::Sqlite(sqlite_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(delete_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .execute(sqlite_conn)? - } else { - diesel::sql_query(delete_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .execute(sqlite_conn)? - } - } - }; - - total_rows_affected += rows_affected; - } - - // 如果至少有两个表进行了删除,则返回 true - Ok(total_rows_affected > 1) -} - - - -async fn update_data_in_kine( - conn: &mut DbConnection, - item_kind: &str, - item_name: &str, - item_version: &str, - item_namespace: Option<&str>, - json_data: &Value, -) -> QueryResult { - use diesel::sql_types::Text; - use chrono::Utc; - - // 需要更新的表列表 - let tables = ["kine", "kine_replica1", "kine_replica2"]; - let mut total_rows_affected = 0; - - for &table in &tables { - let update_query = if let Some(_) = item_namespace { - match conn { - DbConnection::Pg(_) => format!( - "UPDATE {} SET data = $1, updated_time = $2 WHERE kind = $3 AND name = $4 AND namespace = $5 AND apigroup = $6", - table - ), - DbConnection::Sqlite(_) => format!( - "UPDATE {} SET data = ?, updated_time = ? WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?", - table - ), - } - } else { - match conn { - DbConnection::Pg(_) => format!( - "UPDATE {} SET data = $1, updated_time = $2 WHERE kind = $3 AND name = $4 AND apigroup = $5", - table - ), - DbConnection::Sqlite(_) => format!( - "UPDATE {} SET data = ?, updated_time = ? WHERE kind = ? AND name = ? AND apigroup = ?", - table - ), - } - }; - - // 执行更新操作 - let rows_affected = match conn { - DbConnection::Pg(pg_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(update_query) - .bind::(json_data.to_string()) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .execute(pg_conn)? - } else { - diesel::sql_query(update_query) - .bind::(json_data.to_string()) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .execute(pg_conn)? - } - } - DbConnection::Sqlite(sqlite_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(update_query) - .bind::(json_data.to_string()) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .execute(sqlite_conn)? - } else { - diesel::sql_query(update_query) - .bind::(json_data.to_string()) - .bind::(Utc::now().naive_utc().to_string()) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .execute(sqlite_conn)? - } - } - }; - - total_rows_affected += rows_affected; - } - - // 如果至少有两个表更新成功,则返回 true - Ok(total_rows_affected > 1) -} - - -// 辅助查询函数,用于获取数据的 `data` 字段 -#[derive(QueryableByName)] -struct DataResult { - #[diesel(sql_type = Text)] - data: String, -} - - - -async fn get_data_from_kine( - conn: &mut DbConnection, - item_kind: &str, - item_name: &str, - item_version: &str, - item_namespace: Option<&str>, -) -> QueryResult> { - use diesel::sql_types::Text; - use std::collections::HashMap; - - // 表名列表 - let tables = ["kine", "kine_replica1", "kine_replica2"]; - - // 存储每个表的查询结果 - let mut results = HashMap::new(); - - for &table in &tables { - let select_query = if let Some(_) = item_namespace { - match conn { - DbConnection::Pg(_) => format!( - "SELECT data FROM {} WHERE kind = $1 AND name = $2 AND namespace = $3 AND apigroup = $4", - table - ), - DbConnection::Sqlite(_) => format!( - "SELECT data FROM {} WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?", - table - ), - } - } else { - match conn { - DbConnection::Pg(_) => format!( - "SELECT data FROM {} WHERE kind = $1 AND name = $2 AND apigroup = $3", - table - ), - DbConnection::Sqlite(_) => format!( - "SELECT data FROM {} WHERE kind = ? AND name = ? AND apigroup = ?", - table - ), - } - }; - - let data_result = match conn { - DbConnection::Pg(pg_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .get_result::(pg_conn) - .optional()? - .map(|res| res.data) - } else { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .get_result::(pg_conn) - .optional()? - .map(|res| res.data) - } - } - DbConnection::Sqlite(sqlite_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .get_result::(sqlite_conn) - .optional()? - .map(|res| res.data) - } else { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .get_result::(sqlite_conn) - .optional()? - .map(|res| res.data) - } - } - }; - - if let Some(data) = data_result { - *results.entry(data).or_insert(0) += 1; + Self { + service: APIServerService::new() } } - - // 按少数服从多数规则返回数据 - if results.len() == 1 { - // 如果三个表的结果一致,直接返回任意结果 - Ok(results.into_iter().next().map(|(data, _)| data)) - } else if results.values().all(|&count| count == 1) { - // 如果所有表结果不同,直接回退到 kine 表的数据 - get_data_from_kine_primary(conn, item_kind, item_name, item_version, item_namespace) - } else if let Some((data, _)) = results.into_iter().max_by_key(|&(_, count)| count) { - // 如果有多数一致的数据,返回该数据 - Ok(Some(data)) - } else { - // 默认回退到 kine 表的数据 - get_data_from_kine_primary(conn, item_kind, item_name, item_version, item_namespace) - } } -/// 获取主表 `kine` 的数据 -fn get_data_from_kine_primary( - conn: &mut DbConnection, - item_kind: &str, - item_name: &str, - item_version: &str, - item_namespace: Option<&str>, -) -> QueryResult> { - let fallback_query = if let Some(_) = item_namespace { - match conn { - DbConnection::Pg(_) => "SELECT data FROM kine WHERE kind = $1 AND name = $2 AND namespace = $3 AND apigroup = $4".to_string(), - DbConnection::Sqlite(_) => "SELECT data FROM kine WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?".to_string(), - } - } else { - match conn { - DbConnection::Pg(_) => "SELECT data FROM kine WHERE kind = $1 AND name = $2 AND apigroup = $3".to_string(), - DbConnection::Sqlite(_) => "SELECT data FROM kine WHERE kind = ? AND name = ? AND apigroup = ?".to_string(), - } - }; - - match conn { - DbConnection::Pg(pg_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(fallback_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .get_result::(pg_conn) - .optional() - .map(|res| res.map(|data_result| data_result.data)) - } else { - diesel::sql_query(fallback_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .get_result::(pg_conn) - .optional() - .map(|res| res.map(|data_result| data_result.data)) - } - } - DbConnection::Sqlite(sqlite_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(fallback_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(namespace) - .bind::(item_version) - .get_result::(sqlite_conn) - .optional() - .map(|res| res.map(|data_result| data_result.data)) - } else { - diesel::sql_query(fallback_query) - .bind::(item_kind) - .bind::(item_name) - .bind::(item_version) - .get_result::(sqlite_conn) - .optional() - .map(|res| res.map(|data_result| data_result.data)) - } - } - } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct APIServerResponse +{ + pub status_code: APIServerStatusCode, + pub message: String, + pub data: Option, } - - - -// 辅助函数:从指定表中获取所有符合条件的数据的 `data` 字段 -async fn get_all_data_from_kine( - conn: &mut DbConnection, - item_kind: &str, - item_version: &str, - item_namespace: Option<&str>, -) -> QueryResult> { - use diesel::sql_types::Text; - use std::collections::HashMap; - - // 定义需要查询的表 - let tables = ["kine", "kine_replica1", "kine_replica2"]; - - // 存储每个表的查询结果 - let mut table_results: HashMap<&str, Vec> = HashMap::new(); - - // 遍历每个表进行查询 - for &table in &tables { - let select_query = if let Some(_) = item_namespace { - match conn { - DbConnection::Pg(_) => format!( - "SELECT data FROM {} WHERE kind = $1 AND namespace = $2 AND apigroup = $3", - table - ), - DbConnection::Sqlite(_) => format!( - "SELECT data FROM {} WHERE kind = ? AND namespace = ? AND apigroup = ?", - table - ), - } - } else { - match conn { - DbConnection::Pg(_) => format!( - "SELECT data FROM {} WHERE kind = $1 AND apigroup = $2", - table - ), - DbConnection::Sqlite(_) => format!( - "SELECT data FROM {} WHERE kind = ? AND apigroup = ?", - table - ), - } - }; - - // 执行查询 - let results: Vec = match conn { - DbConnection::Pg(pg_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(namespace) - .bind::(item_version) - .load::(pg_conn)? - .into_iter() - .map(|res| res.data) - .collect() - } else { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(item_version) - .load::(pg_conn)? - .into_iter() - .map(|res| res.data) - .collect() - } - } - DbConnection::Sqlite(sqlite_conn) => { - if let Some(namespace) = item_namespace { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(namespace) - .bind::(item_version) - .load::(sqlite_conn)? - .into_iter() - .map(|res| res.data) - .collect() - } else { - diesel::sql_query(select_query) - .bind::(item_kind) - .bind::(item_version) - .load::(sqlite_conn)? - .into_iter() - .map(|res| res.data) - .collect() - } +impl From> for APIServerResponse +where + T: Serialize + DeserializeOwned + Clone, +{ + fn from(result: APIServerResult) -> Self { + match result { + Ok(data) => APIServerResponse { + status_code: APIServerStatusCode::OK, + message: "Success".to_string(), + data: Some(data), + }, + Err(err) => APIServerResponse { + status_code: err.status_code, + message: err.message, + data: None, } - }; - - table_results.insert(table, results); - } - - // 检查所有表的数据是否一致 - let mut unique_results: HashMap = HashMap::new(); - for results in table_results.values() { - for result in results { - *unique_results.entry(result.clone()).or_insert(0) += 1; } } - // 筛选出出现次数大于等于 2 的数据 - let filtered_results: Vec = unique_results - .into_iter() - .filter(|(_, count)| *count >= 2) // 过滤条件 - .map(|(data, _)| data) // 提取数据部分 - .collect(); - - Ok(filtered_results) } - - - -// 发送请求并等待响应 -async fn send_request( - message: Message, - nats_cli: Arc, -) -> std::result::Result<(), Box> +impl From> for HttpResponse where - T: Serialize + for<'de> Deserialize<'de> + Debug + Clone + Send + Sync + 'static, + T: Serialize + DeserializeOwned + Clone, { - match nats_cli - .request(message, time::Duration::from_secs(100)) - .await - { - Ok(response) => { - if response.contains("Error handling reply") { - Err(Box::new(std::io::Error::new(std::io::ErrorKind::Other, "Error found in the eventbus reply!")) as Box) - } else { - Ok(()) - } - } - Err(e) => { - Err(Box::new(e) as Box) - } + fn from(response: APIServerResponse) -> Self { + HttpResponse::Ok().json(response) } } -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ApiServerMessage { - pub content: Value, -} - - -// watch的事件类型 -#[derive(Clone, Debug, Serialize)] -pub enum EventType { - Create, - Update, - Delete, -} - -// 事件结构,包含事件类型和相关信息 -#[derive(Clone, Debug, Serialize)] -pub struct Event { - pub event_type: EventType, - pub message: Value -} - -// 事件管理器,管理不同资源类型的事件频道 -#[derive(Clone)] -pub struct EventManager { - // 使用 HashMap 来管理不同资源类型的广播通道 - resource_channels: Arc>>>, -} - -impl EventManager { - pub fn new() -> Self { - EventManager { - resource_channels: Arc::new(Mutex::new(HashMap::new())), - } +#[async_trait] +impl Handler for DefaultHandler { + async fn create_resource(&self, params: K8sStylePathParams, data: Value, app_state: Arc) -> HttpResponse { + HttpResponse::from(APIServerResponse::from(self.service.create_resource(params.into(), data, app_state).await)) } - // 获取资源类型对应的广播通道 - fn get_channel(&self, resource_type: &str) -> broadcast::Sender { - let mut channels = self.resource_channels.lock().unwrap(); - // 如果不存在该资源类型的通道,创建一个新的 - channels.entry(resource_type.to_string()).or_insert_with(|| { - let (tx, _) = broadcast::channel(100); // 创建一个容量为 100 的广播通道 - tx - }).clone() + async fn delete_resource(&self, params: K8sStylePathParams, app_state: Arc) -> HttpResponse { + HttpResponse::from(APIServerResponse::from(self.service.delete_resource(params.into(), app_state).await)) } - // 向指定资源类型广播事件 - pub async fn send_event(&self, resource_type: &str, event_type: EventType, message: Value) { - let channel = self.get_channel(resource_type); - - // 创建一个事件并发送到广播通道 - let event = Event { - event_type, - message, - }; - let _ = channel.send(event); // 发送事件 + async fn update_resource(&self, params: K8sStylePathParams, data: Value, app_state: Arc) -> HttpResponse { + HttpResponse::from(APIServerResponse::from(self.service.update_resource(params.into(), data, app_state).await)) } -} - -#[async_trait] -impl Handler for DefaultHandler { - - // /api/{version}/{plural} - async fn create_api_without_namespace( - &self, - info: web::Path<(String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (version, plural) = info.into_inner(); - let data = data.into_inner(); - - //判断是正常的插入函数还是 crd 资源的注册函数 - if plural == "crds" { - let item_kind = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("plural")) - .and_then(|plural| plural.as_str()) - .unwrap_or("error"); - - let kind_upper = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("kind")) - .and_then(|kind| kind.as_str()) - .unwrap_or("error"); - - - // 调用check_metadata函数 - let metadata_exists = check_metadata(db_connection, item_kind, &version,false).await.map_err(ErrorInternalServerError)?; - - if metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 crd 资源已存在,无需重复注册" }))); - } - - insert_metadata(db_connection, item_kind, &version, false, &data) - .await - .map_err(ErrorInternalServerError)?; - - insert_key_value(item_kind, kind_upper); - } else { - // 调用check_metadata函数 - let metadata_exists = check_metadata(db_connection, &plural, &version,false).await.map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请重新注册或检查 plural 版本以及是否需要 namespace" }))); - } - - // 从 json_data 中提取 metadata.name - let item_name = data - .get("metadata") - .and_then(|metadata| metadata.get("name")) - .and_then(|name| name.as_str()) - .unwrap_or("error"); - - let kine_exists = check_kine(db_connection, &plural, item_name, &version, None).await.map_err(ErrorInternalServerError)?; - - if kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "该资源已存在,请勿重复创建" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - // request消息 - let request_message = Message::new( - "CREATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: data.clone(), - }), - None, - ); - - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}", version, plural); - let watchone_resource_type = format!("{}/{}/{}", version, plural, item_name); + async fn get_resource(&self, params: K8sStylePathParams, query: Value, app_state: Arc) -> HttpResponse { + HttpResponse::from(APIServerResponse::from(self.service.get_resource(params.into(), query, app_state).await)) + } - event_manager.send_event(&resource_type, EventType::Create, data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Create, data.clone()).await; + async fn patch_resource(&self, params: K8sStylePathParams, data: Value, app_state: Arc) -> HttpResponse { + HttpResponse::from(APIServerResponse::from(self.service.patch_resource(params.into(), data, app_state).await)) + } - insert_kine(db_connection, &plural, item_name, &data, &version, None) - .await - .map_err(ErrorInternalServerError)?; + fn default(&self) -> Self { + Self { + service: APIServerService::new() } - - Ok(HttpResponse::Ok().json(data)) } +} - // /api/{version}/namespaces/{namespace}/{plural} - async fn create_api_with_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (version, namespace, plural) = info.into_inner(); - let data = data.into_inner(); - - //判断是正常的插入函数还是 crd 资源的注册函数 - if plural == "crds" { - let item_kind = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("plural")) - .and_then(|plural| plural.as_str()) - .unwrap_or("error"); - - let kind_upper = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("kind")) - .and_then(|kind| kind.as_str()) - .unwrap_or("error"); - - // 调用check_metadata函数 - let metadata_exists = check_metadata(db_connection, item_kind, &version, true).await.map_err(ErrorInternalServerError)?; - - if metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 crd 资源已存在,无需重复注册" }))); - } - - insert_metadata(db_connection, item_kind, &version, true, &data) - .await - .map_err(ErrorInternalServerError)?; - - insert_key_value(item_kind, kind_upper); - } else { - // 调用check_metadata函数 - let metadata_exists = check_metadata(db_connection, &plural, &version, true).await.map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请重新注册或检查 plural 版本以及是否需要 namespace" }))); - } - - // 从 json_data 中提取 metadata.name - let item_name = data - .get("metadata") - .and_then(|metadata| metadata.get("name")) - .and_then(|name| name.as_str()) - .unwrap_or("error"); - - let kine_exists = check_kine(db_connection, &plural, item_name, &version, Some(&namespace)).await.map_err(ErrorInternalServerError)?; - - if kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "该资源已存在,请勿重复创建" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - // request消息 - let request_message = Message::new( - "CREATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: data.clone(), - }), - None, - ); - - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}/{}", version, namespace, plural); - let watchone_resource_type = format!("{}/{}/{}/{}", version, namespace, plural, item_name); - - event_manager.send_event(&resource_type, EventType::Create, data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Create, data.clone()).await; - - insert_kine(db_connection, &plural, item_name, &data, &version, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)?; - } - - Ok(HttpResponse::Ok().json(data)) - } - - // /apis/{group}/{version}/{plural} - async fn create_apis_without_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (group, version, plural) = info.into_inner(); - let ver = group + "/" + &*version; - let data = data.into_inner(); - - //判断是正常的插入函数还是 crd 资源的注册函数 - if plural == "crds" { - let item_kind = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("plural")) - .and_then(|plural| plural.as_str()) - .unwrap_or("error"); - - let kind_upper = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("kind")) - .and_then(|kind| kind.as_str()) - .unwrap_or("error"); - - // 调用check_metadata函数 - let metadata_exists = check_metadata(db_connection, item_kind, &ver, false).await.map_err(ErrorInternalServerError)?; - - if metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 crd 资源已存在,无需重复注册" }))); - } - - insert_metadata(db_connection, item_kind, &ver, false, &data) - .await - .map_err(ErrorInternalServerError)?; - - insert_key_value(item_kind, kind_upper); - } else { - // 调用check_metadata函数 - let metadata_exists = check_metadata(db_connection, &plural, &ver, false).await.map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请重新注册或检查 plural 版本以及是否需要 namespace" }))); - } - - // 从 json_data 中提取 metadata.name - let item_name = data - .get("metadata") - .and_then(|metadata| metadata.get("name")) - .and_then(|name| name.as_str()) - .unwrap_or("error"); - - let kine_exists = check_kine(db_connection, &plural, item_name, &ver, None).await.map_err(ErrorInternalServerError)?; - - if kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "该资源已存在,请勿重复创建" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - // request消息 - let request_message = Message::new( - "CREATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: data.clone(), - }), - None, - ); - - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}", ver, plural); - let watchone_resource_type = format!("{}/{}/{}", ver, plural, item_name); - - event_manager.send_event(&resource_type, EventType::Create, data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Create, data.clone()).await; - - insert_kine(db_connection, &plural, item_name, &data, &ver, None) - .await - .map_err(ErrorInternalServerError)?; - } - - Ok(HttpResponse::Ok().json(data)) - } - - // /apis/{group}/{version}/namespaces/{namespace}/{plural} - async fn create_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (group, version, namespace, plural) = info.into_inner(); - - let ver = group + "/" + &*version; - let data = data.into_inner(); - - //判断是正常的插入函数还是 crd 资源的注册函数 - if plural == "crds" { - let item_kind = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("plural")) - .and_then(|plural| plural.as_str()) - .unwrap_or("error"); - - let kind_upper = data - .get("spec") - .and_then(|spec| spec.get("names")) - .and_then(|names| names.get("kind")) - .and_then(|kind| kind.as_str()) - .unwrap_or("error"); - - // 调用 check_metadata 函数 - let metadata_exists = check_metadata(db_connection, item_kind, &ver, true).await.map_err(ErrorInternalServerError)?; - - if metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 crd 资源已存在,无需重复注册" }))); - } - - insert_metadata(db_connection, item_kind, &ver, true, &data) - .await - .map_err(ErrorInternalServerError)?; - - insert_key_value(item_kind, kind_upper); - } else { - // 调用 check_metadata 函数 - let metadata_exists = check_metadata(db_connection, &plural, &ver, true).await.map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请重新注册或检查 plural 版本以及是否需要 namespace" }))); - } - - // 从 json_data 中提取 metadata.name - let item_name = data - .get("metadata") - .and_then(|metadata| metadata.get("name")) - .and_then(|name| name.as_str()) - .unwrap_or("error"); - - let kine_exists = check_kine(db_connection, &plural, item_name, &ver, Some(&namespace)).await.map_err(ErrorInternalServerError)?; - - if kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "该资源已存在,请勿重复创建" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - // request消息 - let request_message = Message::new( - "CREATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: data.clone(), - }), - None, - ); - - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}/{}", ver, namespace, plural); - let watchone_resource_type = format!("{}/{}/{}/{}", ver, namespace, plural, item_name); - - event_manager.send_event(&resource_type, EventType::Create, data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Create, data.clone()).await; - - insert_kine(db_connection, &plural, item_name, &data, &ver, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)?; - } - - Ok(HttpResponse::Ok().json(data)) - } - - // /api/{version}/{plural}/{name} - async fn delete_api_without_namespace( - &self, - info: web::Path<(String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (version, plural, name) = info.into_inner(); - - // 检查 metadata 中是否存在该 plural 且不需要 namespace - let metadata_exists = check_metadata(db_connection, &plural, &version, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &version, None).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": version.clone(), - "Namespaces": "", - "Plural": plural.clone(), - "Name": name.clone(), - }); - - // request消息 - let request_message = Message::new( - "DELETE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}", version, plural); - let watchone_resource_type = format!("{}/{}/{}", version, plural, name); - - event_manager.send_event(&resource_type, EventType::Delete, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Delete, request_data).await; - - // 从 plural 表中删除指定的 name - let deleted = delete_from_kine(db_connection, &plural, &name, &version, None) - .await - .map_err(ErrorInternalServerError)?; - - if !deleted { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(json!({ "status": "delete_api_without_namespace success" }))) - } - - // /api/{version}/namespaces/{namespace}/{plural}/{name} - async fn delete_api_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (version, namespace, plural, name) = info.into_inner(); - // 检查 metadata 中是否存在该 plural 且需要 namespace - let metadata_exists = check_metadata(db_connection, &plural, &version, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &version, Some(&namespace)).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": version.clone(), - "Namespaces": namespace.clone(), - "Plural": plural.clone(), - "Name": name.clone(), - }); - - // request消息 - let request_message = Message::new( - "DELETE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}/{}", version, namespace, plural); - let watchone_resource_type = format!("{}/{}/{}/{}", version, namespace, plural, name); - - event_manager.send_event(&resource_type, EventType::Delete, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Delete, request_data).await; - - // 从 plural 表中删除指定的数据 - let deleted = delete_from_kine(db_connection, &plural, &name, &version, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)?; - - if !deleted { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(json!({ "status": "delete_api_with_namespace success" }))) - } - - // /apis/{group}/{version}/{plural}/{name} - async fn delete_apis_without_namespace( - &self, - info: web::Path<(String, String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (group, version, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &ver, None).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": ver.clone(), - "Namespaces": "", - "Plural": plural.clone(), - "Name": name.clone(), - }); - - // request消息 - let request_message = Message::new( - "DELETE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}", ver, plural); - let watchone_resource_type = format!("{}/{}/{}", ver, plural, name); - - event_manager.send_event(&resource_type, EventType::Delete, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Delete, request_data).await; - - let deleted = delete_from_kine(db_connection, &plural, &name, &ver, None) - .await - .map_err(ErrorInternalServerError)?; - - if !deleted { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(json!({ "status": "delete_apis_without_namespace success" }))) - } - - // /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name} - async fn delete_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String, String)>, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (group, version, namespace, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &ver, Some(&namespace)).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": ver.clone(), - "Namespaces": namespace.clone(), - "Plural": plural.clone(), - "Name": name.clone(), - }); - - // request消息 - let request_message = Message::new( - "DELETE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}/{}", ver, namespace, plural); - let watchone_resource_type = format!("{}/{}/{}/{}", ver, namespace, plural, name); - - event_manager.send_event(&resource_type, EventType::Delete, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Delete, request_data).await; - - let deleted = delete_from_kine(db_connection, &plural, &name, &ver, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)?; - - if !deleted { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(json!({ "status": "delete_apis_with_namespace success" }))) - } - - // /api/{version}/{plural}/{name} - async fn update_api_without_namespace( - &self, - info: web::Path<(String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (version, plural, name) = info.into_inner(); - let data = data.into_inner(); - - let metadata_exists = check_metadata(db_connection, &plural, &version, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &version, None).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": version.clone(), - "Namespaces": "", - "Plural": plural.clone(), - "Name": name.clone(), - "Data": data.clone() - }); - - // request消息 - let request_message = Message::new( - "UPDATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}", version, plural); - let watchone_resource_type = format!("{}/{}/{}", version, plural, name); - - event_manager.send_event(&resource_type, EventType::Update, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Update, request_data).await; - - // 查询 plural 表中是否存在 name 匹配的数据 - let updated = update_data_in_kine(db_connection, &plural, &name, &version, None, &data) - .await - .map_err(ErrorInternalServerError)?; - - if !updated { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(data)) - } - - // /api/{version}/namespaces/{namespace}/{plural}/{name} - async fn update_api_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (version, namespace, plural, name) = info.into_inner(); - let data = data.into_inner(); - - // 检查 metadata 中是否存在 plural 且要求 namespace - let metadata_exists = check_metadata(db_connection, &plural, &version, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &version, Some(&namespace)).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": version.clone(), - "Namespaces": namespace.clone(), - "Plural": plural.clone(), - "Name": name.clone(), - "Data": data.clone() - }); - - // request消息 - let request_message = Message::new( - "UPDATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}/{}", version, namespace, plural); - let watchone_resource_type = format!("{}/{}/{}/{}", version, namespace, plural, name); - - event_manager.send_event(&resource_type, EventType::Update, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Update, request_data).await; - - let updated = update_data_in_kine(db_connection, &plural, &name, &version, Some(&namespace), &data) - .await - .map_err(ErrorInternalServerError)?; - - if !updated { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(data)) - } - - // /apis/{group}/{version}/{plural}/{name} - async fn update_apis_without_namespace( - &self, - info: web::Path<(String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (group, version, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - let data = data.into_inner(); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &ver, None).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": ver, - "Namespaces": "", - "Plural": plural.clone(), - "Name": name.clone(), - "Data": data.clone() - }); - - // request消息 - let request_message = Message::new( - "UPDATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}", ver, plural); - let watchone_resource_type = format!("{}/{}/{}", ver, plural, name); - - event_manager.send_event(&resource_type, EventType::Update, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Update, request_data).await; - - let updated = update_data_in_kine(db_connection, &plural, &name, &ver, None, &data) - .await - .map_err(ErrorInternalServerError)?; - - if !updated { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(data)) - } - - // /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name} - async fn update_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String, String)>, - data: web::Json, - db_connection: &mut DbConnection, - nats_cli: Arc, - event_manager: EventManager, - ) -> Result { - let (group, version, namespace, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - let data = data.into_inner(); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let kine_exists = check_kine(db_connection, &plural, &name, &ver, Some(&namespace)).await.map_err(ErrorInternalServerError)?; - - if !kine_exists { - return Ok(HttpResponse::InternalServerError().json(json!({ "error": "指定数据不存在" }))); - } - - let mut model_map = HashMap::new(); - let kind = get_value(&*plural.clone()).unwrap(); - - model_map.insert("MODEL".to_string(), kind); - - let request_data = json!({ - "apiVersion": ver, - "Namespaces": namespace.clone(), - "Plural": plural.clone(), - "Name": name.clone(), - "Data": data.clone() - }); - - // request消息 - let request_message = Message::new( - "UPDATE".to_string(), - message::NativeEventAction::Other, - Some(model_map), - Some(ApiServerMessage { - content: request_data.clone(), - }), - None, - ); - send_request(request_message, nats_cli).await.map_err(ErrorInternalServerError)?; - - let resource_type = format!("{}/{}/{}", ver, namespace, plural); - let watchone_resource_type = format!("{}/{}/{}/{}", ver, namespace, plural, name); - - event_manager.send_event(&resource_type, EventType::Update, request_data.clone()).await; - event_manager.send_event(&watchone_resource_type, EventType::Update, request_data).await; - - let updated = update_data_in_kine(db_connection, &plural, &name, &ver, Some(&namespace), &data) - .await - .map_err(ErrorInternalServerError)?; - - if !updated { - return Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))); - } - - Ok(HttpResponse::Ok().json(data)) - } - - // /api/{version}/{plural}/{name} - async fn getone_api_without_namespace( - &self, - info: web::Path<(String, String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (version, plural, name) = info.into_inner(); - let metadata_exists = check_metadata(db_connection, &plural, &version, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - if let Some(data) = get_data_from_kine(db_connection, &plural, &name, &version, None) - .await - .map_err(ErrorInternalServerError)? { - // 将字符串转换为 JSON 格式 - match serde_json::from_str::(&data) { - Ok(json_data) => return Ok(HttpResponse::Ok().json(json_data)), // 成功解析为 JSON,返回 JSON 响应 - Err(_) => return Ok(HttpResponse::InternalServerError().json(json!({ "error": "数据格式错误,无法解析为 JSON" }))), - } - } - Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))) - } - - // /api/{version}/namespaces/{namespace}/{plural}/{name} - async fn getone_api_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (version, namespace, plural, name) = info.into_inner(); - let metadata_exists = check_metadata(db_connection, &plural, &version, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - if let Some(data) = get_data_from_kine(db_connection, &plural, &name, &version, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)? { - // 将字符串转换为 JSON 格式 - match serde_json::from_str::(&data) { - Ok(json_data) => return Ok(HttpResponse::Ok().json(json_data)), // 成功解析为 JSON,返回 JSON 响应 - Err(_) => return Ok(HttpResponse::InternalServerError().json(json!({ "error": "数据格式错误,无法解析为 JSON" }))), - } - } - Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))) - } - - // /apis/{group}/{version}/{plural}/{name} - async fn getone_apis_without_namespace( - &self, - info: web::Path<(String, String, String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (group, version, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - if let Some(data) = get_data_from_kine(db_connection, &plural, &name, &ver, None) - .await - .map_err(ErrorInternalServerError)? { - // 将字符串转换为 JSON 格式 - match serde_json::from_str::(&data) { - Ok(json_data) => return Ok(HttpResponse::Ok().json(json_data)), // 成功解析为 JSON,返回 JSON 响应 - Err(_) => return Ok(HttpResponse::InternalServerError().json(json!({ "error": "数据格式错误,无法解析为 JSON" }))), - } - } - Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))) - } - - // /apis/{group}/{version}/namespaces/{namespace}/{plural}/{name} - async fn getone_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (group, version, namespace, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - if let Some(data) = get_data_from_kine(db_connection, &plural, &name, &ver, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)? { - // 将字符串转换为 JSON 格式 - match serde_json::from_str::(&data) { - Ok(json_data) => return Ok(HttpResponse::Ok().json(json_data)), // 成功解析为 JSON,返回 JSON 响应 - Err(_) => return Ok(HttpResponse::InternalServerError().json(json!({ "error": "数据格式错误,无法解析为 JSON" }))), - } - } - Ok(HttpResponse::NotFound().json(json!({ "error": "指定数据不存在" }))) - } - - // /api/{version}/{plural} - async fn listall_api_without_namespace( - &self, - info: web::Path<(String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (version, plural) = info.into_inner(); - let metadata_exists = check_metadata(db_connection, &plural, &version, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let data_list = get_all_data_from_kine(db_connection, &plural, &version, None) - .await - .map_err(ErrorInternalServerError)?; - - // 将所有字符串解析为 JSON 格式并收集到一个数组中 - let json_array: Vec = data_list - .into_iter() - .filter_map(|data_str| serde_json::from_str(&data_str).ok()) // 解析成功的数据 - .collect(); - - Ok(HttpResponse::Ok().json(json_array)) - } - - // /api/{version}/namespaces/{namespace}/{plural} - async fn listall_api_with_namespace( - &self, - info: web::Path<(String, String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (version, namespace, plural) = info.into_inner(); - let metadata_exists = check_metadata(db_connection, &plural, &version, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let data_list = get_all_data_from_kine(db_connection, &plural, &version, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)?; - - // 将所有字符串解析为 JSON 格式并收集到一个数组中 - let json_array: Vec = data_list - .into_iter() - .filter_map(|data_str| serde_json::from_str(&data_str).ok()) // 解析成功的数据 - .collect(); - - Ok(HttpResponse::Ok().json(json_array)) - } - - // /apis/{group}/{version}/{plural} - async fn listall_apis_without_namespace( - &self, - info: web::Path<(String, String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (group, version, plural) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, false) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let data_list = get_all_data_from_kine(db_connection, &plural, &ver, None) - .await - .map_err(ErrorInternalServerError)?; - - // 将所有字符串解析为 JSON 格式并收集到一个数组中 - let json_array: Vec = data_list - .into_iter() - .filter_map(|data_str| serde_json::from_str(&data_str).ok()) // 解析成功的数据 - .collect(); - - Ok(HttpResponse::Ok().json(json_array)) - } - - // /apis/{group}/{version}/namespaces/{namespace}/{plural} - async fn listall_apis_with_namespace( - &self, - info: web::Path<(String, String, String, String)>, - _data: web::Query, - db_connection: &mut DbConnection, - _nats_cli: Arc, - ) -> Result { - let (group, version, namespace, plural) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let metadata_exists = check_metadata(db_connection, &plural, &ver, true) - .await - .map_err(ErrorInternalServerError)?; - - if !metadata_exists { - return Ok(HttpResponse::NotFound().json(json!({ "error": "该 plural 不存在,请检查 plural 版本以及是否需要 namespace" }))); - } - - let data_list = get_all_data_from_kine(db_connection, &plural, &ver, Some(&namespace)) - .await - .map_err(ErrorInternalServerError)?; - - // 将所有字符串解析为 JSON 格式并收集到一个数组中 - let json_array: Vec = data_list - .into_iter() - .filter_map(|data_str| serde_json::from_str(&data_str).ok()) // 解析成功的数据 - .collect(); - - Ok(HttpResponse::Ok().json(json_array)) - } - - async fn watchall_api_without_namespace( - &self, - info: Path<(String, String)>, - _data: Query, - event_manager: EventManager - ) -> Result { - let (version, plural) = info.into_inner(); - let resource_type = format!("{}/{}", version, plural); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - println!("{}", json_event); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - async fn watchall_api_with_namespace( - &self, - info: Path<(String, String, String)>, - _data: Query, - event_manager: EventManager - ) -> Result { - let (version, namespace, plural) = info.into_inner(); - let resource_type = format!("{}/{}/{}", version, namespace, plural); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - async fn watchall_apis_without_namespace( - &self, - info: Path<(String, String, String)>, - _data: Query, - event_manager: EventManager - ) -> Result { - let (group, version, plural) = info.into_inner(); - let ver = format!("{}/{}", group, version); - let resource_type = format!("{}/{}", ver, plural); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - async fn watchall_apis_with_namespace( - &self, - info: Path<(String, String, String, String)>, - _data: Query, - event_manager: EventManager - ) -> Result { - let (group, version, namespace, plural) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let resource_type = format!("{}/{}/{}", ver, namespace, plural); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - async fn watchone_api_without_namespace(&self, info: Path<(String, String, String)>, _data: Query, event_manager: EventManager) -> Result { - let (version, plural, name) = info.into_inner(); - let resource_type = format!("{}/{}/{}", version, plural, name); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - async fn watchone_api_with_namespace(&self, info: Path<(String, String, String, String)>, _data: Query, event_manager: EventManager) -> Result { - let (version, namespace, plural, name) = info.into_inner(); - let resource_type = format!("{}/{}/{}/{}", version, namespace, plural, name); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - async fn watchone_apis_without_namespace(&self, info: Path<(String, String, String, String)>, _data: Query, event_manager: EventManager) -> Result { - let (group, version, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let resource_type = format!("{}/{}/{}", ver, plural, name); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - async fn watchone_apis_with_namespace(&self, info: Path<(String, String, String, String, String)>, _data: Query, event_manager: EventManager) -> Result { - let (group, version, namespace, plural, name) = info.into_inner(); - let ver = format!("{}/{}", group, version); - - let resource_type = format!("{}/{}/{}/{}", ver, namespace, plural, name); - - let channel = event_manager.get_channel(&resource_type); - let mut rx = channel.subscribe(); - - let stream = async_stream::stream! { - loop { - match rx.recv().await { - Ok(event) => { - let json_event = serde_json::to_string(&event).unwrap(); - yield Ok::(Bytes::from(json_event)); - } - Err(_) => { - yield Err(actix_web::error::ErrorInternalServerError("Error occurred").into()); - } - } - } - }; - - Ok(HttpResponse::Ok() - .content_type("text/event-stream") - .streaming(stream)) - } - - - fn default(&self) -> DefaultHandler { - DefaultHandler {} - } -} - -#[cfg(test)] -mod tests { - use super::*; - use actix_web::{test, web, App}; - use serde_json::json; - use crate::cores::db::DbPool; - use std::sync::Arc; - use feventbus::traits::controller::EventBus; - use once_cell::sync::Lazy; - - // 使用 Lazy 来初始化静态的共享资源 - static DB_POOL: Lazy> = Lazy::new(|| { - let pool = DbPool::new_in_memory().expect("Failed to create in-memory database pool"); - Arc::new(pool) - }); - - static HANDLER: Lazy> = Lazy::new(|| Arc::new(DefaultHandler::new())); - - - #[actix_web::test] - async fn test_api_with_namespace() { - let nats_cli = Arc::new(NatsCli::new().await.unwrap()); - - let app = test::init_service( - App::new() - .route("/api/{version}/namespaces/{namespace}/{plural}", web::post().to({ - let nats_cli = Arc::clone(&nats_cli); - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.create_api_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/namespaces/{namespace}/{plural}/{name}", web::put().to({ - let nats_cli = Arc::clone(&nats_cli); - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.update_api_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/namespaces/{namespace}/{plural}/{name}", web::delete().to({ - let nats_cli = Arc::clone(&nats_cli); - move |path| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.delete_api_with_namespace(path, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/namespaces/{namespace}/{plural}/{name}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.getone_api_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/namespaces/{namespace}/{plural}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.listall_api_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - ).await; - - - - - let create_req_data = json!({ - "apiVersion": "v1", - "kind": "Cargo", - "metadata": { - "name": "cargo-case-01", - "annotations": { - "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" - } - }, - "spec": { - "containers": [ - { - "name": "cargo-case-01", - "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req = test::TestRequest::post() - .uri("/api/v1/namespaces/global/cargos") - .set_json(&create_req_data) - .to_request(); - - let create_resp = test::call_service(&app, create_req).await; - - assert!(create_resp.status().is_success()); - - let create_resp_body = test::read_body_json::(create_resp).await; - assert_eq!(create_resp_body, create_req_data); - - - - - - - - let update_req_data = json!({ - "apiVersion": "v1", - "kind": "Cargo", - "metadata": { - "name": "cargo-case-01", - "annotations": { - "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" - } - }, - "spec": { - "containers": [ - { - "name": "cargo-case-01", - "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:123", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let update_req = test::TestRequest::put() - .uri("/api/v1/namespaces/global/cargos/cargo-case-01") - .set_json(&update_req_data) - .to_request(); - - let update_resp = test::call_service(&app, update_req).await; - - assert!(update_resp.status().is_success()); - - let update_resp_body = test::read_body_json::(update_resp).await; - assert_eq!(update_resp_body, update_req_data); - - - - - let get_req_data = json!({ - "apiVersion": "v1", - "kind": "Cargo", - "metadata": { - "name": "cargo-case-01", - "annotations": { - "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" - } - }, - "spec": { - "containers": [ - { - "name": "cargo-case-01", - "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:123", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let get_req = test::TestRequest::get() - .uri("/api/v1/namespaces/global/cargos/cargo-case-01") - .to_request(); - - let get_resp = test::call_service(&app, get_req).await; - - assert!(get_resp.status().is_success()); - - let get_resp_body = test::read_body_json::(get_resp).await; - assert_eq!(get_resp_body, get_req_data); - - - - - let create_req_data2 = json!({ - "apiVersion": "v1", - "kind": "Cargo", - "metadata": { - "name": "cargo-case-02", - "annotations": { - "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" - } - }, - "spec": { - "containers": [ - { - "name": "cargo-case-02", - "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req2 = test::TestRequest::post() - .uri("/api/v1/namespaces/global/cargos") - .set_json(&create_req_data2) - .to_request(); - - let create_resp2 = test::call_service(&app, create_req2).await; - - assert!(create_resp2.status().is_success()); - - let create_resp_body2 = test::read_body_json::(create_resp2).await; - assert_eq!(create_resp_body2, create_req_data2); - - - - let list_req_data = json!([{ - "apiVersion": "v1", - "kind": "Cargo", - "metadata": { - "name": "cargo-case-01", - "annotations": { - "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" - } - }, - "spec": { - "containers": [ - { - "name": "cargo-case-01", - "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:123", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }, - { - "apiVersion": "v1", - "kind": "Cargo", - "metadata": { - "name": "cargo-case-02", - "annotations": { - "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" - } - }, - "spec": { - "containers": [ - { - "name": "cargo-case-02", - "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - - let list_req = test::TestRequest::get() - .uri("/api/v1/namespaces/global/cargos") - .to_request(); - - let list_resp = test::call_service(&app, list_req).await; - - assert!(list_resp.status().is_success()); - - let list_resp_body = test::read_body_json::(list_resp).await; - assert_eq!(list_resp_body, list_req_data); - - - - let delete_req = test::TestRequest::delete() - .uri("/api/v1/namespaces/global/cargos/cargo-case-01") - .to_request(); - - let delete_resp = test::call_service(&app, delete_req).await; - - assert!(delete_resp.status().is_success()); - - let list_req_data2 = json!([ - { - "apiVersion": "v1", - "kind": "Cargo", - "metadata": { - "name": "cargo-case-02", - "annotations": { - "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" - } - }, - "spec": { - "containers": [ - { - "name": "cargo-case-02", - "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - - let list_req2 = test::TestRequest::get() - .uri("/api/v1/namespaces/global/cargos") - .to_request(); - - let list_resp2 = test::call_service(&app, list_req2).await; - - assert!(list_resp2.status().is_success()); - - let list_resp_body2 = test::read_body_json::(list_resp2).await; - assert_eq!(list_resp_body2, list_req_data2); - - } - - - - #[actix_web::test] - async fn test_api_without_namespace() { - let nats_cli = Arc::new(NatsCli::new().await.unwrap()); - - let app = test::init_service( - App::new() - .route("/api/{version}/{plural}", web::post().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.create_api_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/{plural}/{name}", web::put().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.update_api_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/{plural}/{name}", web::delete().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.delete_api_without_namespace(path, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/{plural}/{name}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.getone_api_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/api/{version}/{plural}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.listall_api_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - ).await; - - - let create_req_data = json!({ - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "name": "node-case-01", - }, - "spec": { - "containers": [ - { - "name": "node-case-01", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req = test::TestRequest::post() - .uri("/api/v1/nodes") - .set_json(&create_req_data) - .to_request(); - - let create_resp = test::call_service(&app, create_req).await; - - assert!(create_resp.status().is_success()); - - let create_resp_body = test::read_body_json::(create_resp).await; - assert_eq!(create_resp_body, create_req_data); - - - - - - - - let update_req_data = json!({ - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "name": "node-case-01", - }, - "spec": { - "containers": [ - { - "name": "node-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }); - - let update_req = test::TestRequest::put() - .uri("/api/v1/nodes/node-case-01") - .set_json(&update_req_data) - .to_request(); - - let update_resp = test::call_service(&app, update_req).await; - - assert!(update_resp.status().is_success()); - - let update_resp_body = test::read_body_json::(update_resp).await; - assert_eq!(update_resp_body, update_req_data); - - - - - let get_req_data = json!({ - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "name": "node-case-01", - }, - "spec": { - "containers": [ - { - "name": "node-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }); - - let get_req = test::TestRequest::get() - .uri("/api/v1/nodes/node-case-01") - .to_request(); - - let get_resp = test::call_service(&app, get_req).await; - - assert!(get_resp.status().is_success()); - - let get_resp_body = test::read_body_json::(get_resp).await; - assert_eq!(get_resp_body, get_req_data); - - - - - let create_req_data2 = json!({ - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "name": "node-case-02", - }, - "spec": { - "containers": [ - { - "name": "node-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req2 = test::TestRequest::post() - .uri("/api/v1/nodes") - .set_json(&create_req_data2) - .to_request(); - - let create_resp2 = test::call_service(&app, create_req2).await; - - assert!(create_resp2.status().is_success()); - - let create_resp_body2 = test::read_body_json::(create_resp2).await; - assert_eq!(create_resp_body2, create_req_data2); - - - - let list_req_data = json!([{ - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "name": "node-case-01", - }, - "spec": { - "containers": [ - { - "name": "node-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }, - { - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "name": "node-case-02", - }, - "spec": { - "containers": [ - { - "name": "node-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - - let list_req = test::TestRequest::get() - .uri("/api/v1/nodes") - .to_request(); - - let list_resp = test::call_service(&app, list_req).await; - - assert!(list_resp.status().is_success()); - - let list_resp_body = test::read_body_json::(list_resp).await; - assert_eq!(list_resp_body, list_req_data); - - - - let delete_req = test::TestRequest::delete() - .uri("/api/v1/nodes/node-case-01") - .to_request(); - - let delete_resp = test::call_service(&app, delete_req).await; - - assert!(delete_resp.status().is_success()); - - let list_req_data2 = json!([ - { - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "name": "node-case-02", - }, - "spec": { - "containers": [ - { - "name": "node-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - let list_req2 = test::TestRequest::get() - .uri("/api/v1/nodes") - .to_request(); - - let list_resp2 = test::call_service(&app, list_req2).await; - - assert!(list_resp2.status().is_success()); - - let list_resp_body2 = test::read_body_json::(list_resp2).await; - assert_eq!(list_resp_body2, list_req_data2); - - } - - - - - #[actix_web::test] - async fn test_apis_with_namespace() { - let nats_cli = Arc::new(NatsCli::new().await.unwrap()); - - let app = test::init_service( - App::new() - .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}", web::post().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.create_apis_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}", web::put().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.update_apis_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}", web::delete().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.delete_apis_with_namespace(path, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.getone_apis_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.listall_apis_with_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - ).await; - - - - let create_req_data = json!({ - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": { - "name": "job-case-01", - }, - "spec": { - "containers": [ - { - "name": "job-case-01", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req = test::TestRequest::post() - .uri("/apis/batch/v1/namespaces/global/jobs") - .set_json(&create_req_data) - .to_request(); - - let create_resp = test::call_service(&app, create_req).await; - - assert!(create_resp.status().is_success()); - - let create_resp_body = test::read_body_json::(create_resp).await; - assert_eq!(create_resp_body, create_req_data); - - - - - - - - let update_req_data = json!({ - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": { - "name": "job-case-01", - }, - "spec": { - "containers": [ - { - "name": "job-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }); - - let update_req = test::TestRequest::put() - .uri("/apis/batch/v1/namespaces/global/jobs/job-case-01") - .set_json(&update_req_data) - .to_request(); - - let update_resp = test::call_service(&app, update_req).await; - - assert!(update_resp.status().is_success()); - - let update_resp_body = test::read_body_json::(update_resp).await; - assert_eq!(update_resp_body, update_req_data); - - - - - let get_req_data = json!({ - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": { - "name": "job-case-01", - }, - "spec": { - "containers": [ - { - "name": "job-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }); - - let get_req = test::TestRequest::get() - .uri("/apis/batch/v1/namespaces/global/jobs/job-case-01") - .to_request(); - - let get_resp = test::call_service(&app, get_req).await; - - assert!(get_resp.status().is_success()); - - let get_resp_body = test::read_body_json::(get_resp).await; - assert_eq!(get_resp_body, get_req_data); - - - - - let create_req_data2 = json!({ - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": { - "name": "job-case-02", - }, - "spec": { - "containers": [ - { - "name": "job-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req2 = test::TestRequest::post() - .uri("/apis/batch/v1/namespaces/global/jobs") - .set_json(&create_req_data2) - .to_request(); - - let create_resp2 = test::call_service(&app, create_req2).await; - - assert!(create_resp2.status().is_success()); - - let create_resp_body2 = test::read_body_json::(create_resp2).await; - assert_eq!(create_resp_body2, create_req_data2); - - - - let list_req_data = json!([{ - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": { - "name": "job-case-01", - }, - "spec": { - "containers": [ - { - "name": "job-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }, - { - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": { - "name": "job-case-02", - }, - "spec": { - "containers": [ - { - "name": "job-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - - let list_req = test::TestRequest::get() - .uri("/apis/batch/v1/namespaces/global/jobs") - .to_request(); - - let list_resp = test::call_service(&app, list_req).await; - - assert!(list_resp.status().is_success()); - - let list_resp_body = test::read_body_json::(list_resp).await; - assert_eq!(list_resp_body, list_req_data); - - - - let delete_req = test::TestRequest::delete() - .uri("/apis/batch/v1/namespaces/global/jobs/job-case-01") - .to_request(); - - let delete_resp = test::call_service(&app, delete_req).await; - - assert!(delete_resp.status().is_success()); - - let list_req_data2 = json!([ - { - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": { - "name": "job-case-02", - }, - "spec": { - "containers": [ - { - "name": "job-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - let list_req2 = test::TestRequest::get() - .uri("/apis/batch/v1/namespaces/global/jobs") - .to_request(); - - let list_resp2 = test::call_service(&app, list_req2).await; - - assert!(list_resp2.status().is_success()); - - let list_resp_body2 = test::read_body_json::(list_resp2).await; - assert_eq!(list_resp_body2, list_req_data2); - - } - - - #[actix_web::test] - async fn test_crd_and_apis_without_namespace() { - let nats_cli = Arc::new(NatsCli::new().await.unwarp()); - - let app = test::init_service( - App::new() - .route("/apis/{group}/{version}/{plural}", web::post().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.create_apis_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/{plural}/{name}", web::put().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.update_apis_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/{plural}/{name}", web::delete().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.delete_apis_without_namespace(path, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/{plural}/{name}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.getone_apis_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - .route("/apis/{group}/{version}/{plural}", web::get().to({ - let nats_cli = Arc::clone(&nats_cli); - - move |path, data| { - let handler = HANDLER.clone(); - let db_pool = DB_POOL.clone(); - let nats_cli = Arc::clone(&nats_cli); - - async move { - match db_pool.get_connection() { - Ok(mut conn) => handler.listall_apis_without_namespace(path, data, &mut conn, nats_cli).await, - Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), - } - } - } - })) - ).await; - - - let crd_req_data = json!({ - "apiVersion": "batch/v1", - "kind": "CustomResourceDefinition", - "metadata": { - "name": "myjob.iscas.cn" - }, - "spec": { - "names": - { - "kind": "Myjob", - "listKind": "MyjobList", - "plural": "myjobs", - "singular": "myjob" - } - } - }); - - let crd_req = test::TestRequest::post() - .uri("/apis/batch/v1/crds") - .set_json(&crd_req_data) - .to_request(); - - let crd_resp = test::call_service(&app, crd_req).await; - - assert!(crd_resp.status().is_success()); - - let crd_resp_body = test::read_body_json::(crd_resp).await; - assert_eq!(crd_resp_body, crd_req_data); - - - let create_req_data = json!({ - "apiVersion": "batch/v1", - "kind": "Myjob", - "metadata": { - "name": "myjob-case-01", - }, - "spec": { - "containers": [ - { - "name": "myjob-case-01", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req = test::TestRequest::post() - .uri("/apis/batch/v1/myjobs") - .set_json(&create_req_data) - .to_request(); - - let create_resp = test::call_service(&app, create_req).await; - - assert!(create_resp.status().is_success()); - - let create_resp_body = test::read_body_json::(create_resp).await; - assert_eq!(create_resp_body, create_req_data); - - - - - - - - let update_req_data = json!({ - "apiVersion": "batch/v1", - "kind": "Myjob", - "metadata": { - "name": "myjob-case-01", - }, - "spec": { - "containers": [ - { - "name": "myjob-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }); - - let update_req = test::TestRequest::put() - .uri("/apis/batch/v1/myjobs/myjob-case-01") - .set_json(&update_req_data) - .to_request(); - - let update_resp = test::call_service(&app, update_req).await; - - assert!(update_resp.status().is_success()); - - let update_resp_body = test::read_body_json::(update_resp).await; - assert_eq!(update_resp_body, update_req_data); - - - - - let get_req_data = json!({ - "apiVersion": "batch/v1", - "kind": "Myjob", - "metadata": { - "name": "myjob-case-01", - }, - "spec": { - "containers": [ - { - "name": "myjob-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }); - - let get_req = test::TestRequest::get() - .uri("/apis/batch/v1/myjobs/myjob-case-01") - .to_request(); - - let get_resp = test::call_service(&app, get_req).await; - - assert!(get_resp.status().is_success()); - - let get_resp_body = test::read_body_json::(get_resp).await; - assert_eq!(get_resp_body, get_req_data); - - - - - let create_req_data2 = json!({ - "apiVersion": "batch/v1", - "kind": "Myjob", - "metadata": { - "name": "myjob-case-02", - }, - "spec": { - "containers": [ - { - "name": "myjob-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }); - - let create_req2 = test::TestRequest::post() - .uri("/apis/batch/v1/myjobs") - .set_json(&create_req_data2) - .to_request(); - - let create_resp2 = test::call_service(&app, create_req2).await; - - assert!(create_resp2.status().is_success()); - - let create_resp_body2 = test::read_body_json::(create_resp2).await; - assert_eq!(create_resp_body2, create_req_data2); - - - - let list_req_data = json!([{ - "apiVersion": "batch/v1", - "kind": "Myjob", - "metadata": { - "name": "myjob-case-01", - }, - "spec": { - "containers": [ - { - "name": "myjob-case-01", - "command": [ - "sleep", - "1" - ] - } - ] - } - }, - { - "apiVersion": "batch/v1", - "kind": "Myjob", - "metadata": { - "name": "myjob-case-02", - }, - "spec": { - "containers": [ - { - "name": "myjob-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - - let list_req = test::TestRequest::get() - .uri("/apis/batch/v1/myjobs") - .to_request(); - - let list_resp = test::call_service(&app, list_req).await; - - assert!(list_resp.status().is_success()); - - let list_resp_body = test::read_body_json::(list_resp).await; - assert_eq!(list_resp_body, list_req_data); - - - - let delete_req = test::TestRequest::delete() - .uri("/apis/batch/v1/myjobs/myjob-case-01") - .to_request(); - - let delete_resp = test::call_service(&app, delete_req).await; - - assert!(delete_resp.status().is_success()); - - let list_req_data2 = json!([ - { - "apiVersion": "batch/v1", - "kind": "Myjob", - "metadata": { - "name": "myjob-case-02", - }, - "spec": { - "containers": [ - { - "name": "myjob-case-02", - "command": [ - "sleep", - "3600" - ] - } - ] - } - }]); - let list_req2 = test::TestRequest::get() - .uri("/apis/batch/v1/myjobs") - .to_request(); - - let list_resp2 = test::call_service(&app, list_req2).await; - - assert!(list_resp2.status().is_success()); - - let list_resp_body2 = test::read_body_json::(list_resp2).await; - assert_eq!(list_resp_body2, list_req_data2); - - } -} diff --git a/src/cores/mod.rs b/src/cores/mod.rs index a34795b8c2f5dec2be64c9063eade14e5abae24e..ea4b623a375f67ee59455eb01436a18d70bb20c9 100644 --- a/src/cores/mod.rs +++ b/src/cores/mod.rs @@ -1,11 +1,61 @@ +use crate::cores::apiserver::{APIServer, AppState}; +use crate::cores::events::{P2PEventServer, WatchEventPublisher}; +use crate::cores::handlers::DefaultHandler; +use crate::db::db::DbPool; +use feventbus::impls::nats::nats::NatsCli; +use feventbus::traits::controller::EventBus; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + /** * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences - * author: wuheng@iscas.ac.cn + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn * since: 0.1.0 - **/ + * +**/ pub mod apiserver; -pub mod config; pub mod handlers; pub mod checker; -pub mod db; \ No newline at end of file +pub mod events; +pub mod services; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ResourcesMessage { + group: Option, + version: String, + namespace: Option, + plural: String, +} + +pub async fn prepare_app_state(database_url: &str) -> anyhow::Result { + let db_pool = Arc::new(DbPool::new(database_url)?); + let handler: DefaultHandler = DefaultHandler::new(); + let nats_cli = Arc::new(NatsCli::new().await?); + let watch_event_publisher = Arc::new(WatchEventPublisher::new(nats_cli.clone())); + Ok(AppState { + db_pool, + handler: Arc::new(handler), + nats_cli, + watch_event_publisher, + }) +} + +/// 启动 Web 服务器 +/// +/// # 参数 +/// - `database_url`: 数据库连接字符串 +/// - `address`: 服务监听地址(如 "0.0.0.0:8080") +/// +/// # 返回值 +/// - 异步运行结果 +pub async fn start_server(database_url: &str, address: &str) -> anyhow::Result<()> { + let app_state = prepare_app_state(database_url).await?; + // 启动watch相关事件监听协程 + app_state.clone().watch_event_publisher.start(); + // 启动P2P事件监听协程 + P2PEventServer::new(Arc::from(app_state.clone())).start(); + APIServer::new().start(address, app_state).await?; + Ok(()) +} + diff --git a/src/cores/services.rs b/src/cores/services.rs new file mode 100644 index 0000000000000000000000000000000000000000..9e671c57356a5d40170cb53ecdec24c24ccd9a8a --- /dev/null +++ b/src/cores/services.rs @@ -0,0 +1,556 @@ +use crate::cores::apiserver::{AppState, K8sStylePathParams}; +use crate::cores::events::WatchEventPublisher; +use crate::db::check_exist::{check_kine, check_metadata}; +use crate::db::db::DbConnection; +use crate::db::delete::delete_from_kine; +use crate::db::get::{get_all_data_from_kine, get_data_from_kine}; +use crate::db::insert::{insert_kine, insert_metadata}; +use crate::db::update::update_data_in_kine; +use feventbus::err::Error as FEventBusError; +use fleetmod::utils::APIVersion; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use std::cell::RefCell; +use std::collections::HashMap; +use std::error::Error; +use std::fmt::{Display, Formatter}; +use std::sync::{Arc, LazyLock, Mutex}; +use fleetmod::FleetResource; + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub enum APIServerStatusCode { + // 正常流程 + OK = 20000, + + // 内部错误流程 + InternalError = 50000, + + // 请求方错误流程 + NotFound = 40004, + BadRequest = 40000, + Duplicated = 40001, + Timeout = 40002, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct APIServerError { + pub status_code: APIServerStatusCode, + pub message: String, +} + +impl From for APIServerError { + fn from(err: FEventBusError) -> Self { + match err { + FEventBusError::Timeout(msg) => APIServerError::timeout(msg.as_str()), + _ => APIServerError::internal_error(err.to_string().as_str()), + } + } +} + +// todo! 使用macro实现 +impl APIServerError { + pub fn new(t: APIServerStatusCode, msg: &str) -> Self { + Self { + status_code: t, + message: msg.to_string(), + } + } + + pub fn internal_error(message: &str) -> Self { + Self::new(APIServerStatusCode::InternalError, message) + } + + pub fn not_found(message: &str) -> APIServerError { + Self::new(APIServerStatusCode::NotFound, message) + } + + pub fn bad_request(message: &str) -> APIServerError { + Self::new(APIServerStatusCode::BadRequest, message) + } + + pub fn duplicated(message: &str) -> APIServerError { + Self::new(APIServerStatusCode::Duplicated, message) + } + + pub fn timeout(message: &str) -> APIServerError { + Self::new(APIServerStatusCode::Timeout, message) + } +} + +impl Display for APIServerError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + std::fmt::Debug::fmt(&self, f) + } +} + +impl Error for APIServerError {} + +impl From for APIServerError { + fn from(error: diesel::result::Error) -> Self { + Self::internal_error(error.to_string().as_str()) + } +} + +pub type APIServerResult = Result; + +// todo! 之后修改成缓存模块 +// 定义全局哈希表来获取model名 +static GLOBAL_HASHMAP: LazyLock>> = LazyLock::new(|| { + let mut map = HashMap::new(); + map.insert("pods".to_string(), "Pod".to_string()); + map.insert("nodes".to_string(), "Node".to_string()); + map.insert("jobs".to_string(), "Job".to_string()); + Mutex::new(map) +}); + +struct MetadataCache {} + +impl MetadataCache { + fn insert_key_value(key: &str, value: &str) { + let mut map = GLOBAL_HASHMAP.lock().unwrap(); + map.insert(key.to_string(), value.to_string()); + } + + fn get_map() -> HashMap { + let map = GLOBAL_HASHMAP.lock().unwrap(); + map.clone() + } +} + + +#[derive(Clone)] +pub struct APIServerService {} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum PluralOrKind { + Plural(String), + Kind(String), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct APIServerServiceParams { + pub group: Option, + pub plural_or_kind: PluralOrKind, + pub version: String, + pub namespace: Option, + pub name: Option, +} + +impl APIServerServiceParams { + pub fn from_resource(resource: &T) -> APIServerServiceParams + where + T: FleetResource, + { + let kind = resource.get_kind(); + let metadata = resource.get_metadata(); + let api_version = resource.get_api_version(); + let mut builder = APIServerServiceParamsBuilder::new() + .kind(kind.to_string()) + .version(api_version.version.clone()) + .name(metadata.name.clone()); + if metadata.namespace.is_some() { + builder = builder.namespace(metadata.namespace.clone().unwrap()); + } + if api_version.group.is_some() { + builder = builder.group(api_version.group.clone().unwrap()); + } + builder.build().unwrap() + } +} + +impl From for APIServerServiceParams { + fn from(params: K8sStylePathParams) -> Self { + APIServerServiceParams { + group: params.group, + plural_or_kind: PluralOrKind::Plural(params.plural), + version: params.version, + namespace: params.namespace, + name: params.name, + } + } +} + +pub struct APIServerServiceParamsBuilder { + pub group: Option, + pub plural_or_kind: Option, + pub version: Option, + pub namespace: Option, + pub name: Option, +} + +impl APIServerServiceParamsBuilder { + pub fn new() -> Self { + Self { + group: None, + plural_or_kind: None, + version: None, + namespace: None, + name: None, + } + } + + pub fn group(mut self, group: String) -> Self { + self.group = Some(group); + self + } + + pub fn plural(mut self, plural: String) -> Self { + self.plural_or_kind = Some(PluralOrKind::Plural(plural)); + self + } + + pub fn kind(mut self, kind: String) -> Self { + self.plural_or_kind = Some(PluralOrKind::Kind(kind)); + self + } + + pub fn version(mut self, version: String) -> Self { + self.version = Some(version); + self + } + + pub fn namespace(mut self, namespace: String) -> Self { + self.namespace = Some(namespace); + self + } + + pub fn name(mut self, name: String) -> Self { + self.name = Some(name); + self + } + + pub fn build(self) -> anyhow::Result { + if self.plural_or_kind.is_none() { + return Err(anyhow::anyhow!("plural_or_kind is required")); + } + if self.version.is_none() { + return Err(anyhow::anyhow!("version is required")); + } + Ok(APIServerServiceParams { + group: self.group, + plural_or_kind: self.plural_or_kind.unwrap(), + version: self.version.unwrap(), + namespace: self.namespace, + name: self.name, + }) + } +} + +impl APIServerService { + pub fn new() -> Self { + Self {} + } + + pub async fn create_resource( + &self, + params: APIServerServiceParams, + data: Value, + app_state: Arc, + ) -> APIServerResult { + let ServiceCtx { mut db_conn, watch_event_publisher, api_version_str, plural, namespace, .. } = Self::prepare_ctx(params, app_state, Some(false))?; + + if plural == "crds" { + // 处理 CRD 资源注册 + let item_kind = data + .get("spec") + .and_then(|spec| spec.get("names")) + .and_then(|names| names.get("plural")) + .and_then(|plural| plural.as_str()) + .unwrap_or("error"); + + let kind_upper = data + .get("spec") + .and_then(|spec| spec.get("names")) + .and_then(|names| names.get("kind")) + .and_then(|kind| kind.as_str()) + .unwrap_or("error"); + + let metadata_exists = check_metadata(db_conn.get_mut(), item_kind, api_version_str.as_str(), namespace.is_some()) + .await?; + + if metadata_exists { + return Err(APIServerError::duplicated("该CRD资源已存在,无需重复注册")); + } + + insert_metadata(db_conn.get_mut(), item_kind, api_version_str.as_str(), namespace.is_some(), &data) + .await?; + + MetadataCache::insert_key_value(item_kind, kind_upper); + } else { + // 检查 metadata 是否存在 + Self::check_metadata_exists(db_conn.get_mut(), plural.as_str(), api_version_str.as_str(), namespace.is_some()) + .await?; + + // 检查资源是否已存在 + let item_name = data + .get("metadata") + .and_then(|metadata| metadata.get("name")) + .and_then(|name| name.as_str()) + .unwrap_or("error"); + + let kine_exists = check_kine( + db_conn.get_mut(), + plural.as_str(), + item_name, + api_version_str.as_str(), + namespace.as_deref(), + ).await?; + + if kine_exists { + return Err(APIServerError::duplicated("该资源已存在,请勿重复创建")); + } + + insert_kine( + db_conn.get_mut(), + plural.as_str(), + item_name, + &data, + api_version_str.as_str(), + namespace.as_deref(), + ).await?; + } + watch_event_publisher.publish_create_event(data.clone()).await; + Ok(data) + } + + pub async fn delete_resource( + &self, + params: APIServerServiceParams, + app_state: Arc, + ) -> APIServerResult { + let ServiceCtx { mut db_conn, watch_event_publisher, api_version_str, plural, namespace, name, .. } = Self::prepare_ctx(params, app_state, Some(true))?; + let name = name.unwrap(); + + // 检查 metadata + Self::check_metadata_exists(db_conn.get_mut(), plural.as_str(), api_version_str.as_str(), namespace.is_some()) + .await?; + + // 检查资源是否存在 + let resource_json_string = get_data_from_kine( + db_conn.get_mut(), + plural.as_str(), + name.as_str(), + api_version_str.as_str(), + namespace.as_deref(), + ).await?; + if resource_json_string.is_none() { + return Err(APIServerError::not_found("指定资源不存在")); + } + let resource = match serde_json::from_str::(resource_json_string.unwrap().as_str()) { + Ok(json_data) => Ok(json_data), + Err(_) => Err(APIServerError::internal_error("资源格式错误,无法解析为 JSON")), + }?; + + // 删除资源 + let deleted = delete_from_kine( + db_conn.get_mut(), + plural.as_str(), + name.as_str(), + api_version_str.as_str(), + namespace.as_deref(), + ).await?; + + if !deleted { + return Err(APIServerError::internal_error("指定资源不存在")); + } + + watch_event_publisher.publish_delete_event(resource.clone()).await; + Ok(resource) + } + + pub async fn update_resource( + &self, + params: APIServerServiceParams, + data: Value, + app_state: Arc, + ) -> APIServerResult { + let ServiceCtx { mut db_conn, watch_event_publisher, api_version_str, plural, namespace, name, .. } = Self::prepare_ctx(params, app_state, Some(true))?; + let name = name.unwrap(); + + // 检查 metadata 是否存在 + Self::check_metadata_exists(db_conn.get_mut(), plural.as_str(), api_version_str.as_str(), namespace.is_some()) + .await?; + + // 检查资源是否存在 + let kine_exists = check_kine( + db_conn.get_mut(), + plural.as_str(), + name.as_str(), + api_version_str.as_str(), + namespace.as_deref(), + ).await?; + + if !kine_exists { + return Err(APIServerError::internal_error("指定资源不存在")); + } + + // 更新资源 + let updated = update_data_in_kine( + db_conn.get_mut(), + plural.as_str(), + name.as_str(), + api_version_str.as_str(), + namespace.as_deref(), + &data, + ).await?; + + if !updated { + return Err(APIServerError::internal_error("更新资源失败")); + } + + watch_event_publisher.publish_update_event(data.clone()).await; + Ok(data) + } + + pub async fn get_resource( + &self, + params: APIServerServiceParams, + _query: Value, + app_state: Arc, + ) -> APIServerResult { + let ServiceCtx { mut db_conn, api_version_str, plural, namespace, name, .. } = Self::prepare_ctx(params, app_state, None)?; + // 检查 metadata 是否存在 + Self::check_metadata_exists(db_conn.get_mut(), plural.as_str(), api_version_str.as_str(), namespace.is_some()) + .await?; + + if name.is_some() { + // 如果name不为空,查询单个resource数据 + if let Some(data) = get_data_from_kine( + db_conn.get_mut(), + plural.as_str(), + name.unwrap().as_str(), + api_version_str.as_str(), + namespace.as_deref(), + ).await? { + // 将字符串解析为 JSON 格式 + return match serde_json::from_str::(&data) { + Ok(json_data) => Ok(json_data), + Err(_) => Err(APIServerError::internal_error("资源格式错误,无法解析为 JSON")), + }; + } + // 如果资源不存在 + Err(APIServerError::not_found("指定资源不存在")) + } else { + // 如果name为空,获取资源列表 + let data_list = get_all_data_from_kine(db_conn.get_mut(), plural.as_str(), api_version_str.as_str(), namespace.as_deref()) + .await?; + + // 将所有字符串解析为 JSON 格式并收集到数组中 + let json_array: Vec = data_list + .into_iter() + .filter_map(|data_str| serde_json::from_str(&data_str).ok()) // 解析成功的数据 + .collect(); + // 返回结果 + Ok(json!(json_array)) + } + } + + pub async fn patch_resource( + &self, + params: APIServerServiceParams, + data: Value, + app_state: Arc, + ) -> APIServerResult { + let ServiceCtx { mut db_conn, watch_event_publisher, api_version_str, plural, namespace, name, .. } = Self::prepare_ctx(params, app_state, Some(true))?; + let name = name.unwrap(); + Self::check_metadata_exists(db_conn.get_mut(), plural.as_str(), api_version_str.as_str(), namespace.is_some()) + .await?; + + let mut curr_resource = { + let curr_resource = get_data_from_kine( + db_conn.get_mut(), + plural.as_str(), + name.as_str(), + api_version_str.as_str(), + namespace.as_deref(), + ).await?; + if curr_resource.is_none() { + return Err(APIServerError::not_found("指定资源不存在")); + } + // 将字符串解析为 JSON 格式 + match serde_json::from_str::(&curr_resource.unwrap()) { + Ok(json_data) => json_data, + Err(_) => return Err(APIServerError::internal_error("现有资源数据格式错误,无法解析为JSON")), + } + }; + let patch_json = data; + json_patch::merge(&mut curr_resource, &patch_json); + let updated = update_data_in_kine( + db_conn.get_mut(), + plural.as_str(), + name.as_str(), + api_version_str.as_str(), + namespace.as_deref(), + &curr_resource, + ).await?; + if !updated { + return Err(APIServerError::internal_error("在Patch时更新资源失败")); + } + watch_event_publisher.publish_update_event(curr_resource.clone()).await; + Ok(curr_resource) + } +} + +struct ServiceCtx { + db_conn: RefCell, + watch_event_publisher: Arc, + api_version_str: String, + plural: String, + namespace: Option, + name: Option, +} + +impl APIServerService { + fn prepare_ctx(params: APIServerServiceParams, app_state: Arc, name_required: Option) -> APIServerResult { + // 获取 path 参数 + let APIServerServiceParams { group, version, plural_or_kind, namespace, name } = params; + if name_required.is_some_and(|r| r) && name.is_none() { + return Err(APIServerError::bad_request("name参数未指定")); + } + if name_required.is_some_and(|r| !r) && name.is_some() { + return Err(APIServerError::bad_request("name参数不应指定")); + } + let res = app_state.db_pool.get_connection(); + if let Err(e) = res { + log::error!("error getting db conn: {}", e); + return Err(APIServerError::internal_error("DB pool error")); + } + let conn = res.unwrap(); + let watch_event_publisher = app_state.watch_event_publisher.clone(); + let plural_kind_map = MetadataCache::get_map(); + let kind_plural_map = plural_kind_map.iter().map(|(k, v)| (v.clone(), k.clone())).collect::>(); + let plural = match plural_or_kind { + PluralOrKind::Plural(p) => { + p.to_string() + } + PluralOrKind::Kind(k) => { + let plural = kind_plural_map.get(k.as_str()); + if plural.is_none() { + return Err(APIServerError::bad_request("未知的资源类型")); + } + plural.unwrap().clone() + } + }; + Ok(ServiceCtx { + db_conn: RefCell::new(conn), + watch_event_publisher, + api_version_str: APIVersion { + group, + version, + }.to_string(), + plural, + namespace, + name, + }) + } + + async fn check_metadata_exists(db_conn: &mut DbConnection, plural: &str, api_version_str: &str, namespaced: bool) -> APIServerResult<()> { + let metadata_exists = check_metadata(db_conn, plural, api_version_str, namespaced) + .await?; + + if !metadata_exists { + return Err(APIServerError::not_found("该plural不存在,请检查plural版本以及是否需要namespace")); + } + Ok(()) + } +} diff --git a/src/db/check_exist.rs b/src/db/check_exist.rs new file mode 100644 index 0000000000000000000000000000000000000000..c04cf76795074161eeff4cebc2ef46f21d145626 --- /dev/null +++ b/src/db/check_exist.rs @@ -0,0 +1,193 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ +use diesel::{QueryDsl, QueryResult, RunQueryDsl, ExpressionMethods}; +use crate::db::db::DbConnection; + +// 查询 metadata 表中的 plural 是否存在,并检查 namespace 要求是否满足 +pub async fn check_metadata( + conn: &mut DbConnection, + plural: &str, + version: &str, + requires_namespace: bool, +) -> QueryResult { + use diesel::dsl::count_star; + use crate::schema::metadata::dsl as metadata_dsl; + use crate::schema::metadata_replica1::dsl as replica1_dsl; + use crate::schema::metadata_replica2::dsl as replica2_dsl; + + let count; + let count_replica1; + let count_replica2; + match conn { + DbConnection::Pg(pg_conn) => { + count = metadata_dsl::metadata + .filter(metadata_dsl::name.eq(plural)) + .filter(metadata_dsl::apigroup.eq(version)) + .filter(metadata_dsl::namespace.eq(requires_namespace)) + .select(count_star()) + .first::(pg_conn)?; + + count_replica1 = replica1_dsl::metadata_replica1 + .filter(replica1_dsl::name.eq(plural)) + .filter(replica1_dsl::apigroup.eq(version)) + .filter(replica1_dsl::namespace.eq(requires_namespace)) + .select(count_star()) + .first::(pg_conn)?; + + count_replica2 = replica2_dsl::metadata_replica2 + .filter(replica2_dsl::name.eq(plural)) + .filter(replica2_dsl::apigroup.eq(version)) + .filter(replica2_dsl::namespace.eq(requires_namespace)) + .select(count_star()) + .first::(pg_conn)?; + }, + DbConnection::Sqlite(sqlite_conn) => { + count = metadata_dsl::metadata + .filter(metadata_dsl::name.eq(plural)) + .filter(metadata_dsl::apigroup.eq(version)) + .filter(metadata_dsl::namespace.eq(requires_namespace)) + .select(count_star()) + .first::(sqlite_conn)?; + + count_replica1 = replica1_dsl::metadata_replica1 + .filter(replica1_dsl::name.eq(plural)) + .filter(replica1_dsl::apigroup.eq(version)) + .filter(replica1_dsl::namespace.eq(requires_namespace)) + .select(count_star()) + .first::(sqlite_conn)?; + + count_replica2 = replica2_dsl::metadata_replica2 + .filter(replica2_dsl::name.eq(plural)) + .filter(replica2_dsl::apigroup.eq(version)) + .filter(replica2_dsl::namespace.eq(requires_namespace)) + .select(count_star()) + .first::(sqlite_conn)?; + } + } + let positive_count = [count, count_replica1, count_replica2].iter().filter(|&&x| x > 0).count(); + Ok(positive_count >= 2) +} + + + +// 查询 kine 表中指定的数据是否存在 +pub async fn check_kine( + conn: &mut DbConnection, + item_kind: &str, + item_name: &str, + item_version: &str, + item_namespace: Option<&str>, +) -> QueryResult { + use diesel::dsl::count_star; + use crate::schema::kine::dsl as kine_dsl; + use crate::schema::kine_replica1::dsl as replica1_dsl; + use crate::schema::kine_replica2::dsl as replica2_dsl; + + let count; + let count_replica1; + let count_replica2; + match conn { + DbConnection::Pg(pg_conn) => { + if let Some(_) = item_namespace { + count = kine_dsl::kine + .filter(kine_dsl::kind.eq(item_kind)) + .filter(kine_dsl::name.eq(item_name)) + .filter(kine_dsl::apigroup.eq(item_version)) + .filter(kine_dsl::namespace.eq(item_namespace)) + .select(count_star()) + .first::(pg_conn)?; + + count_replica1 = replica1_dsl::kine_replica1 + .filter(replica1_dsl::kind.eq(item_kind)) + .filter(replica1_dsl::name.eq(item_name)) + .filter(replica1_dsl::apigroup.eq(item_version)) + .filter(replica1_dsl::namespace.eq(item_namespace)) + .select(count_star()) + .first::(pg_conn)?; + + count_replica2 = replica2_dsl::kine_replica2 + .filter(replica2_dsl::kind.eq(item_kind)) + .filter(replica2_dsl::name.eq(item_name)) + .filter(replica2_dsl::apigroup.eq(item_version)) + .filter(replica2_dsl::namespace.eq(item_namespace)) + .select(count_star()) + .first::(pg_conn)?; + } else { + count = kine_dsl::kine + .filter(kine_dsl::kind.eq(item_kind)) + .filter(kine_dsl::name.eq(item_name)) + .filter(kine_dsl::apigroup.eq(item_version)) + .select(count_star()) + .first::(pg_conn)?; + + count_replica1 = replica1_dsl::kine_replica1 + .filter(replica1_dsl::kind.eq(item_kind)) + .filter(replica1_dsl::name.eq(item_name)) + .filter(replica1_dsl::apigroup.eq(item_version)) + .select(count_star()) + .first::(pg_conn)?; + + count_replica2 = replica2_dsl::kine_replica2 + .filter(replica2_dsl::kind.eq(item_kind)) + .filter(replica2_dsl::name.eq(item_name)) + .filter(replica2_dsl::apigroup.eq(item_version)) + .select(count_star()) + .first::(pg_conn)?; + } + }, + DbConnection::Sqlite(sqlite_conn) => { + if let Some(_) = item_namespace { + count = kine_dsl::kine + .filter(kine_dsl::kind.eq(item_kind)) + .filter(kine_dsl::name.eq(item_name)) + .filter(kine_dsl::apigroup.eq(item_version)) + .filter(kine_dsl::namespace.eq(item_namespace)) + .select(count_star()) + .first::(sqlite_conn)?; + + count_replica1 = replica1_dsl::kine_replica1 + .filter(replica1_dsl::kind.eq(item_kind)) + .filter(replica1_dsl::name.eq(item_name)) + .filter(replica1_dsl::apigroup.eq(item_version)) + .filter(replica1_dsl::namespace.eq(item_namespace)) + .select(count_star()) + .first::(sqlite_conn)?; + + count_replica2 = replica2_dsl::kine_replica2 + .filter(replica2_dsl::kind.eq(item_kind)) + .filter(replica2_dsl::name.eq(item_name)) + .filter(replica2_dsl::apigroup.eq(item_version)) + .filter(replica2_dsl::namespace.eq(item_namespace)) + .select(count_star()) + .first::(sqlite_conn)?; + } else { + count = kine_dsl::kine + .filter(kine_dsl::kind.eq(item_kind)) + .filter(kine_dsl::name.eq(item_name)) + .filter(kine_dsl::apigroup.eq(item_version)) + .select(count_star()) + .first::(sqlite_conn)?; + + count_replica1 = replica1_dsl::kine_replica1 + .filter(replica1_dsl::kind.eq(item_kind)) + .filter(replica1_dsl::name.eq(item_name)) + .filter(replica1_dsl::apigroup.eq(item_version)) + .select(count_star()) + .first::(sqlite_conn)?; + + count_replica2 = replica2_dsl::kine_replica2 + .filter(replica2_dsl::kind.eq(item_kind)) + .filter(replica2_dsl::name.eq(item_name)) + .filter(replica2_dsl::apigroup.eq(item_version)) + .select(count_star()) + .first::(sqlite_conn)?; + } + } + } + let positive_count = [count, count_replica1, count_replica2].iter().filter(|&&x| x > 0).count(); + Ok(positive_count >= 2) +} \ No newline at end of file diff --git a/src/cores/db.rs b/src/db/db.rs similarity index 97% rename from src/cores/db.rs rename to src/db/db.rs index 6e9fd6adbd82175cc54d2144199bfa68cdb021fc..3deead43220153d1b6338c481d997269b5aadaa6 100644 --- a/src/cores/db.rs +++ b/src/db/db.rs @@ -1,3 +1,10 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ + use std::collections::HashMap; use chrono::Utc; use diesel::pg::PgConnection; @@ -7,9 +14,9 @@ use diesel::r2d2::{ConnectionManager, Pool, PooledConnection}; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use serde_json::{json, Value}; use diesel::connection::Connection; - pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); +#[derive(Debug)] pub enum DbPool { Pg(Pool>), Sqlite(Pool>), @@ -24,12 +31,12 @@ pub enum DbConnection { fn resource_templates() -> HashMap<&'static str, (bool, Value)> { let mut templates = HashMap::new(); templates.insert( - "cargos", + "pods", ( true, json!({ "apiVersion": "v1", - "kind": "Cargo", + "kind": "Pod", "metadata": { "name": "string", "annotations": "object" diff --git a/src/db/delete.rs b/src/db/delete.rs new file mode 100644 index 0000000000000000000000000000000000000000..bc646b73fb8f52287a977b252529c109ea753c12 --- /dev/null +++ b/src/db/delete.rs @@ -0,0 +1,92 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ +use diesel::{QueryResult, RunQueryDsl}; +use crate::db::db::DbConnection; + +// 从 kine 表中删除特定 name 的记录 +pub async fn delete_from_kine( + conn: &mut DbConnection, + item_kind: &str, + item_name: &str, + item_version: &str, + item_namespace: Option<&str>, +) -> QueryResult { + use diesel::sql_types::Text; + + // 表名列表 + let tables = ["kine", "kine_replica1", "kine_replica2"]; + + // 遍历每个表,执行删除操作 + let mut total_rows_affected = 0; + + for &table in &tables { + let delete_query = if let Some(_) = item_namespace { + match conn { + DbConnection::Pg(_) => format!( + "DELETE FROM {} WHERE kind = $1 AND name = $2 AND namespace = $3 AND apigroup = $4", + table + ), + DbConnection::Sqlite(_) => format!( + "DELETE FROM {} WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?", + table + ), + } + } else { + match conn { + DbConnection::Pg(_) => format!( + "DELETE FROM {} WHERE kind = $1 AND name = $2 AND apigroup = $3", + table + ), + DbConnection::Sqlite(_) => format!( + "DELETE FROM {} WHERE kind = ? AND name = ? AND apigroup = ?", + table + ), + } + }; + + // 执行删除 + let rows_affected = match conn { + DbConnection::Pg(pg_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(delete_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .execute(pg_conn)? + } else { + diesel::sql_query(delete_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .execute(pg_conn)? + } + } + DbConnection::Sqlite(sqlite_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(delete_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .execute(sqlite_conn)? + } else { + diesel::sql_query(delete_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .execute(sqlite_conn)? + } + } + }; + + total_rows_affected += rows_affected; + } + + // 如果至少有两个表进行了删除,则返回 true + Ok(total_rows_affected > 1) +} \ No newline at end of file diff --git a/src/db/get.rs b/src/db/get.rs new file mode 100644 index 0000000000000000000000000000000000000000..b07433a5cd8480a4d4397a757a76e6bc5e9c88e8 --- /dev/null +++ b/src/db/get.rs @@ -0,0 +1,313 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ +use diesel::{OptionalExtension, QueryResult, QueryableByName, RunQueryDsl}; +use diesel::sql_types::Text; +use crate::db::db::DbConnection; + +// 辅助查询函数,用于获取数据的 `data` 字段 +#[derive(QueryableByName)] +struct DataResult { + #[diesel(sql_type = Text)] + data: String, +} +pub async fn get_data_from_kine( + conn: &mut DbConnection, + item_kind: &str, + item_name: &str, + item_version: &str, + item_namespace: Option<&str>, +) -> QueryResult> { + use diesel::sql_types::Text; + use std::collections::HashMap; + + // 表名列表 + let tables = ["kine", "kine_replica1", "kine_replica2"]; + + // 存储每个表的查询结果 + let mut results = HashMap::new(); + + for &table in &tables { + let select_query = if let Some(_) = item_namespace { + match conn { + DbConnection::Pg(_) => format!( + "SELECT data FROM {} WHERE kind = $1 AND name = $2 AND namespace = $3 AND apigroup = $4", + table + ), + DbConnection::Sqlite(_) => format!( + "SELECT data FROM {} WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?", + table + ), + } + } else { + match conn { + DbConnection::Pg(_) => format!( + "SELECT data FROM {} WHERE kind = $1 AND name = $2 AND apigroup = $3", + table + ), + DbConnection::Sqlite(_) => format!( + "SELECT data FROM {} WHERE kind = ? AND name = ? AND apigroup = ?", + table + ), + } + }; + + let data_result = match conn { + DbConnection::Pg(pg_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .get_result::(pg_conn) + .optional()? + .map(|res| res.data) + } else { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .get_result::(pg_conn) + .optional()? + .map(|res| res.data) + } + } + DbConnection::Sqlite(sqlite_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .get_result::(sqlite_conn) + .optional()? + .map(|res| res.data) + } else { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .get_result::(sqlite_conn) + .optional()? + .map(|res| res.data) + } + } + }; + + if let Some(data) = data_result { + *results.entry(data).or_insert(0) += 1; + } + } + + // 按少数服从多数规则返回数据 + if results.len() == 1 { + // 如果三个表的结果一致,直接返回任意结果 + Ok(results.into_iter().next().map(|(data, _)| data)) + } else if results.values().all(|&count| count == 1) { + // 如果所有表结果不同,直接回退到 kine 表的数据 + get_data_from_kine_primary(conn, item_kind, item_name, item_version, item_namespace) + } else if let Some((data, _)) = results.into_iter().max_by_key(|&(_, count)| count) { + // 如果有多数一致的数据,返回该数据 + Ok(Some(data)) + } else { + // 默认回退到 kine 表的数据 + get_data_from_kine_primary(conn, item_kind, item_name, item_version, item_namespace) + } +} + +/// 获取主表 `kine` 的数据 +fn get_data_from_kine_primary( + conn: &mut DbConnection, + item_kind: &str, + item_name: &str, + item_version: &str, + item_namespace: Option<&str>, +) -> QueryResult> { + let fallback_query = if let Some(_) = item_namespace { + match conn { + DbConnection::Pg(_) => "SELECT data FROM kine WHERE kind = $1 AND name = $2 AND namespace = $3 AND apigroup = $4".to_string(), + DbConnection::Sqlite(_) => "SELECT data FROM kine WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?".to_string(), + } + } else { + match conn { + DbConnection::Pg(_) => "SELECT data FROM kine WHERE kind = $1 AND name = $2 AND apigroup = $3".to_string(), + DbConnection::Sqlite(_) => "SELECT data FROM kine WHERE kind = ? AND name = ? AND apigroup = ?".to_string(), + } + }; + + match conn { + DbConnection::Pg(pg_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(fallback_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .get_result::(pg_conn) + .optional() + .map(|res| res.map(|data_result| data_result.data)) + } else { + diesel::sql_query(fallback_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .get_result::(pg_conn) + .optional() + .map(|res| res.map(|data_result| data_result.data)) + } + } + DbConnection::Sqlite(sqlite_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(fallback_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .get_result::(sqlite_conn) + .optional() + .map(|res| res.map(|data_result| data_result.data)) + } else { + diesel::sql_query(fallback_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .get_result::(sqlite_conn) + .optional() + .map(|res| res.map(|data_result| data_result.data)) + } + } + } +} + + + + +// 辅助函数:从指定表中获取所有符合条件的数据的 `data` 字段 +pub async fn get_all_data_from_kine( + conn: &mut DbConnection, + item_kind: &str, + item_version: &str, + item_namespace: Option<&str>, +) -> QueryResult> { + use diesel::sql_types::Text; + use std::collections::HashMap; + + // 定义需要查询的表 + let tables = ["kine", "kine_replica1", "kine_replica2"]; + + // 存储每个表的查询结果 + let mut table_results: HashMap<&str, Vec> = HashMap::new(); + + // 遍历每个表进行查询 + for &table in &tables { + let select_query = if let Some(_) = item_namespace { + match conn { + DbConnection::Pg(_) => format!( + "SELECT data FROM {} WHERE kind = $1 AND namespace = $2 AND apigroup = $3", + table + ), + DbConnection::Sqlite(_) => format!( + "SELECT data FROM {} WHERE kind = ? AND namespace = ? AND apigroup = ?", + table + ), + } + } else { + match conn { + DbConnection::Pg(_) => format!( + "SELECT data FROM {} WHERE kind = $1 AND apigroup = $2", + table + ), + DbConnection::Sqlite(_) => format!( + "SELECT data FROM {} WHERE kind = ? AND apigroup = ?", + table + ), + } + }; + + // 执行查询 + let results: Vec = match conn { + DbConnection::Pg(pg_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(namespace) + .bind::(item_version) + .load::(pg_conn)? + .into_iter() + .map(|res| res.data) + .collect() + } else { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(item_version) + .load::(pg_conn)? + .into_iter() + .map(|res| res.data) + .collect() + } + } + DbConnection::Sqlite(sqlite_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(namespace) + .bind::(item_version) + .load::(sqlite_conn)? + .into_iter() + .map(|res| res.data) + .collect() + } else { + diesel::sql_query(select_query) + .bind::(item_kind) + .bind::(item_version) + .load::(sqlite_conn)? + .into_iter() + .map(|res| res.data) + .collect() + } + } + }; + + table_results.insert(table, results); + } + + // 检查所有表的数据是否一致 + let vec1 = table_results[tables[0]].clone(); + let vec2 = table_results[tables[1]].clone(); + let vec3 = table_results[tables[2]].clone(); + + let max_len = vec1.len().max(vec2.len()).max(vec3.len()); + let mut filtered_results = Vec::new(); + + // 筛选出出现次数大于等于 2 的数据 + for i in 0..max_len { + // 获取当前索引的值,越界的填入 None + let value1 = vec1.get(i); + let value2 = vec2.get(i); + let value3 = vec3.get(i); + + // 统计每个值的出现次数 + let mut counts = HashMap::new(); + if let Some(v) = value1 { + *counts.entry(v).or_insert(0) += 1; + } + if let Some(v) = value2 { + *counts.entry(v).or_insert(0) += 1; + } + if let Some(v) = value3 { + *counts.entry(v).or_insert(0) += 1; + } + + // 找出出现次数大于等于 2 的值 + if let Some((value, &_)) = counts.iter().find(|(_, &count)| count >= 2) { + filtered_results.push((*value).clone()); // 将该值存入结果 + } + + } + + Ok(filtered_results) +} diff --git a/src/db/insert.rs b/src/db/insert.rs new file mode 100644 index 0000000000000000000000000000000000000000..c0b9e3da6cfb2d1e1598fb844ce0520741d9332c --- /dev/null +++ b/src/db/insert.rs @@ -0,0 +1,197 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ +use chrono::Utc; +use diesel::{sql_query, Connection, PgConnection, QueryResult, RunQueryDsl, SqliteConnection}; +use diesel::sql_types::Text; +use serde_json::Value; +use crate::db::db::DbConnection; + +fn insert_metadata_in_transaction_pg( + transaction: &mut PgConnection, + plural: &str, + version: &str, + namespace_required: bool, + json_data: &Value, +) -> QueryResult<()> { + use diesel::sql_types::{Bool}; + + // 表名列表 + let table_array: [&str; 3] = ["metadata", "metadata_replica1", "metadata_replica2"]; + + for table_name in table_array { + // 使用参数绑定构建插入查询 + let insert_metadata_query = format!( + "INSERT INTO {} (name, namespace, apigroup, data, created_time, updated_time) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT DO NOTHING;", + table_name + ); + + // 执行插入操作 + sql_query(insert_metadata_query) + .bind::(plural) // 名称 + .bind::(namespace_required) // 是否需要命名空间 + .bind::(version) // 版本 + .bind::(json_data.to_string()) // JSON 数据 + .bind::(Utc::now().naive_utc().to_string()) // 创建时间 + .bind::(Utc::now().naive_utc().to_string()) // 更新时间 + .execute(transaction)?; + } + + Ok(()) +} + + +fn insert_metadata_in_transaction_sqlite( + transaction: &mut SqliteConnection, + plural: &str, + version: &str, + namespace_required: bool, + json_data: &Value, +) -> QueryResult<()> { + use diesel::sql_types::{Bool}; + + // 表名列表 + let table_array: [&str; 3] = ["metadata", "metadata_replica1", "metadata_replica2"]; + + for table_name in table_array { + // 使用参数绑定构建插入查询 + let insert_metadata_query = format!( + "INSERT OR IGNORE INTO {} (name, namespace, apigroup, data, created_time, updated_time) + VALUES (?, ?, ?, ?, ?, ?);", + table_name + ); + + // 执行插入操作 + sql_query(insert_metadata_query) + .bind::(plural) // 名称 + .bind::(namespace_required) // 是否需要命名空间 + .bind::(version) // 版本 + .bind::(json_data.to_string()) // JSON 数据 + .bind::(Utc::now().naive_utc().to_string()) // 创建时间 + .bind::(Utc::now().naive_utc().to_string()) // 更新时间 + .execute(transaction)?; + } + + Ok(()) +} + + +pub async fn insert_metadata( + conn: &mut DbConnection, + plural: &str, + version: &str, + namespace_required: bool, + json_data: &Value +) -> QueryResult<()> { + match conn { + DbConnection::Pg(pg_conn) => { + pg_conn.transaction(|transaction| { + insert_metadata_in_transaction_pg(transaction, plural, version, namespace_required, json_data) + }) + } + DbConnection::Sqlite(sqlite_conn) => { + sqlite_conn.transaction(|transaction| { + insert_metadata_in_transaction_sqlite(transaction, plural, version, namespace_required, json_data) + }) + } + }.expect("unknow conn in insert_metadata"); + Ok(()) +} + +fn insert_kine_in_transaction_pg( + transaction: &mut PgConnection, + item_kind: &str, + item_name: &str, + json_data: &Value, + version: &str, + namespace: Option<&str>, +) -> QueryResult<()> { + + // 表列表 + let table_array: [&str; 3] = ["kine", "kine_replica1", "kine_replica2"]; + + for table_name in table_array { + // 使用参数绑定构建插入查询 + let insert_metadata_query = format!( + "INSERT INTO {} (kind, name, namespace, apigroup, data, created_time, updated_time) + VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT DO NOTHING;", + table_name + ); + + // 执行插入操作 + sql_query(insert_metadata_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace.unwrap_or("")) // 空字符串作为默认 namespace + .bind::(version) + .bind::(json_data.to_string()) // 将 JSON 数据转换为字符串 + .bind::(Utc::now().naive_utc().to_string()) // 创建时间 + .bind::(Utc::now().naive_utc().to_string()) // 更新时间 + .execute(transaction)?; + } + + Ok(()) +} + + +fn insert_kine_in_transaction_sqlite( + transaction: &mut SqliteConnection, + item_kind: &str, + item_name: &str, + json_data: &Value, + version: &str, + namespace: Option<&str>, +) -> QueryResult<()> { + + let table_array: [&str; 3] = ["kine", "kine_replica1", "kine_replica2"]; + + for table_name in table_array { + let insert_metadata_query = format!( + "INSERT OR IGNORE INTO {} (kind, name, namespace, apigroup, data, created_time, updated_time) + VALUES (?, ?, ?, ?, ?, ?, ?);", + table_name + ); + + sql_query(insert_metadata_query) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace.unwrap_or("")) // 使用 Nullable 处理空值 + .bind::(version) + .bind::(json_data.to_string()) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(Utc::now().naive_utc().to_string()) + .execute(transaction)?; + } + Ok(()) +} + + +// 在 kine 表中插入新记录 +pub async fn insert_kine( + conn: &mut DbConnection, + item_kind: &str, + item_name: &str, + json_data: &Value, + version: &str, + namespace: Option<&str>, +) -> QueryResult<()> { + match conn { + DbConnection::Pg(pg_conn) => { + pg_conn.transaction(|transaction| { + insert_kine_in_transaction_pg(transaction, item_kind, item_name, json_data, version, namespace) + }) + } + DbConnection::Sqlite(sqlite_conn) => { + sqlite_conn.transaction(|transaction| { + insert_kine_in_transaction_sqlite(transaction, item_kind, item_name, json_data, version, namespace) + }) + } + }.expect("unknow conn in insert_kine"); + Ok(()) +} \ No newline at end of file diff --git a/src/db/mod.rs b/src/db/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..129c1fdb1efc45f5b022a38c488fee83174daebc --- /dev/null +++ b/src/db/mod.rs @@ -0,0 +1,12 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ +pub mod db; +pub mod check_exist; +pub mod insert; +pub mod update; +pub mod get; +pub mod delete; \ No newline at end of file diff --git a/src/db/update.rs b/src/db/update.rs new file mode 100644 index 0000000000000000000000000000000000000000..d957223f23695895b6bab854fb55bbea8664ce4f --- /dev/null +++ b/src/db/update.rs @@ -0,0 +1,100 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ +use diesel::{QueryResult, RunQueryDsl}; +use serde_json::Value; +use crate::db::db::DbConnection; + +pub async fn update_data_in_kine( + conn: &mut DbConnection, + item_kind: &str, + item_name: &str, + item_version: &str, + item_namespace: Option<&str>, + json_data: &Value, +) -> QueryResult { + use diesel::sql_types::Text; + use chrono::Utc; + + // 需要更新的表列表 + let tables = ["kine", "kine_replica1", "kine_replica2"]; + let mut total_rows_affected = 0; + + for &table in &tables { + let update_query = if let Some(_) = item_namespace { + match conn { + DbConnection::Pg(_) => format!( + "UPDATE {} SET data = $1, updated_time = $2 WHERE kind = $3 AND name = $4 AND namespace = $5 AND apigroup = $6", + table + ), + DbConnection::Sqlite(_) => format!( + "UPDATE {} SET data = ?, updated_time = ? WHERE kind = ? AND name = ? AND namespace = ? AND apigroup = ?", + table + ), + } + } else { + match conn { + DbConnection::Pg(_) => format!( + "UPDATE {} SET data = $1, updated_time = $2 WHERE kind = $3 AND name = $4 AND apigroup = $5", + table + ), + DbConnection::Sqlite(_) => format!( + "UPDATE {} SET data = ?, updated_time = ? WHERE kind = ? AND name = ? AND apigroup = ?", + table + ), + } + }; + + // 执行更新操作 + let rows_affected = match conn { + DbConnection::Pg(pg_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(update_query) + .bind::(json_data.to_string()) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .execute(pg_conn)? + } else { + diesel::sql_query(update_query) + .bind::(json_data.to_string()) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .execute(pg_conn)? + } + } + DbConnection::Sqlite(sqlite_conn) => { + if let Some(namespace) = item_namespace { + diesel::sql_query(update_query) + .bind::(json_data.to_string()) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(item_kind) + .bind::(item_name) + .bind::(namespace) + .bind::(item_version) + .execute(sqlite_conn)? + } else { + diesel::sql_query(update_query) + .bind::(json_data.to_string()) + .bind::(Utc::now().naive_utc().to_string()) + .bind::(item_kind) + .bind::(item_name) + .bind::(item_version) + .execute(sqlite_conn)? + } + } + }; + + total_rows_affected += rows_affected; + } + + // 如果至少有两个表更新成功,则返回 true + Ok(total_rows_affected > 1) +} diff --git a/src/lib.rs b/src/lib.rs index 2f3e95c06ca21fcdd490e77673878eafe1fdaa3c..5368cf181328fc044a481465152cd1a37bf561c8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,31 +1,12 @@ +/** + * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences + * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn + * since: 0.1.0 + * +**/ + pub mod cores; pub mod schema; +pub mod db; -use cores::apiserver::ApiServer; -use cores::config::DefaultConfig; -use cores::handlers::DefaultHandler; -use cores::db::DbPool; -use std::sync::Arc; - -/// 启动 Web 服务器 -/// -/// # 参数 -/// - `database_url`: 数据库连接字符串 -/// - `address`: 服务监听地址(如 "0.0.0.0:8080") -/// -/// # 返回值 -/// - 异步运行结果 -pub async fn start_server(database_url: &str, address: &str) -> Result<(), Box> { - // 创建数据库连接池 - let db_pool = Arc::new(DbPool::new(database_url)?); - - - - // 配置和启动服务器 - let config = Box::new(DefaultConfig::new()); - let server = ApiServer::new(config); - let handler: DefaultHandler = DefaultHandler::new(); - let _ = Arc::new(server).start(address, handler, db_pool).await?; - Ok(()) -} - +pub use cores::{prepare_app_state, start_server}; diff --git a/src/main.rs b/src/main.rs deleted file mode 100644 index d20e3b95831f74a016a61a7de2a915196a84d47d..0000000000000000000000000000000000000000 --- a/src/main.rs +++ /dev/null @@ -1,17 +0,0 @@ -use std::env; -use dotenv::dotenv; -use fleet_apiserver::start_server; - -#[actix_web::main] -async fn main() { - dotenv().ok(); - - // 从环境变量读取配置 - let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set"); - let address = "0.0.0.0:8080"; - - // 启动服务 - if let Err(e) = start_server(&database_url, address).await { - eprintln!("Failed to start server: {}", e); - } -} diff --git a/tests/route_tests.rs b/tests/route_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..c302d08fb493f82acd8461eb4a38493a4f950129 --- /dev/null +++ b/tests/route_tests.rs @@ -0,0 +1,1400 @@ +// /** +// * Copyright (2024, ) Institute of Software, Chinese Academy of Sciences +// * author: chenhongyu23@otcaix.iscas.ac.cn, wuheng@iscas.ac.cn +// * since: 0.1.0 +// * +// **/ +// +// use actix_web::error::ErrorInternalServerError; +// use feventbus::impls::nats::nats::NatsCli; +// use serde_json::Value; +// use fleet_apiserver::cores::db::DbPool; +// use fleet_apiserver::cores::handlers::{DefaultHandler, EventManager}; +// +// #[cfg(test)] +// mod tests { +// use super::*; +// use actix_web::{test, web, App}; +// use serde_json::json; +// use crate::DbPool; +// use std::sync::Arc; +// use feventbus::traits::controller::EventBus; +// use once_cell::sync::Lazy; +// use crate::EventManager; +// use serial_test::serial; +// use fleet_apiserver::cores::handlers::Handler; +// +// // 使用 Lazy 来初始化静态的共享资源 +// static DB_POOL: Lazy> = Lazy::new(|| { +// let pool = DbPool::new_in_memory().expect("Failed to create in-memory database pool"); +// Arc::new(pool) +// }); +// +// static HANDLER: Lazy> = Lazy::new(|| Arc::new(DefaultHandler::new())); +// +// +// +// #[actix_web::test] +// #[serial] +// async fn test_api_with_namespace() { +// let nats_cli = Arc::new(NatsCli::new().await.unwrap()); +// // 初始化事件管理器 +// let event_manager = EventManager::new(); +// +// let app = test::init_service( +// App::new() +// .route("/api/{version}/namespaces/{namespace}/{plural}", web::post().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.create_api_with_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/namespaces/{namespace}/{plural}/{name}", web::put().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.update_api_with_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/namespaces/{namespace}/{plural}/{name}", web::delete().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.delete_api_with_namespace(path, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/namespaces/{namespace}/{plural}/{name}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.getone_api_with_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/namespaces/{namespace}/{plural}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.listall_api_with_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// ).await; +// +// +// +// +// let create_req_data = json!({ +// "apiVersion": "v1", +// "kind": "Cargo", +// "metadata": { +// "name": "cargo-case-01", +// "annotations": { +// "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" +// } +// }, +// "spec": { +// "containers": [ +// { +// "name": "cargo-case-01", +// "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req = test::TestRequest::post() +// .uri("/api/v1/namespaces/global/cargos") +// .set_json(&create_req_data) +// .to_request(); +// +// let create_resp = test::call_service(&app, create_req).await; +// +// assert!(create_resp.status().is_success()); +// +// let create_resp_body = test::read_body_json::(create_resp).await; +// assert_eq!(create_resp_body, create_req_data); +// +// +// +// +// +// +// +// let update_req_data = json!({ +// "apiVersion": "v1", +// "kind": "Cargo", +// "metadata": { +// "name": "cargo-case-01", +// "annotations": { +// "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" +// } +// }, +// "spec": { +// "containers": [ +// { +// "name": "cargo-case-01", +// "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:123", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let update_req = test::TestRequest::put() +// .uri("/api/v1/namespaces/global/cargos/cargo-case-01") +// .set_json(&update_req_data) +// .to_request(); +// +// let update_resp = test::call_service(&app, update_req).await; +// +// assert!(update_resp.status().is_success()); +// +// let update_resp_body = test::read_body_json::(update_resp).await; +// assert_eq!(update_resp_body, update_req_data); +// +// +// +// +// let get_req_data = json!({ +// "apiVersion": "v1", +// "kind": "Cargo", +// "metadata": { +// "name": "cargo-case-01", +// "annotations": { +// "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" +// } +// }, +// "spec": { +// "containers": [ +// { +// "name": "cargo-case-01", +// "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:123", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let get_req = test::TestRequest::get() +// .uri("/api/v1/namespaces/global/cargos/cargo-case-01") +// .to_request(); +// +// let get_resp = test::call_service(&app, get_req).await; +// +// assert!(get_resp.status().is_success()); +// +// let get_resp_body = test::read_body_json::(get_resp).await; +// assert_eq!(get_resp_body, get_req_data); +// +// +// +// +// let create_req_data2 = json!({ +// "apiVersion": "v1", +// "kind": "Cargo", +// "metadata": { +// "name": "cargo-case-02", +// "annotations": { +// "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" +// } +// }, +// "spec": { +// "containers": [ +// { +// "name": "cargo-case-02", +// "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req2 = test::TestRequest::post() +// .uri("/api/v1/namespaces/global/cargos") +// .set_json(&create_req_data2) +// .to_request(); +// +// let create_resp2 = test::call_service(&app, create_req2).await; +// +// assert!(create_resp2.status().is_success()); +// +// let create_resp_body2 = test::read_body_json::(create_resp2).await; +// assert_eq!(create_resp_body2, create_req_data2); +// +// +// +// let list_req_data = json!([{ +// "apiVersion": "v1", +// "kind": "Cargo", +// "metadata": { +// "name": "cargo-case-01", +// "annotations": { +// "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" +// } +// }, +// "spec": { +// "containers": [ +// { +// "name": "cargo-case-01", +// "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:123", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }, +// { +// "apiVersion": "v1", +// "kind": "Cargo", +// "metadata": { +// "name": "cargo-case-02", +// "annotations": { +// "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" +// } +// }, +// "spec": { +// "containers": [ +// { +// "name": "cargo-case-02", +// "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// +// let list_req = test::TestRequest::get() +// .uri("/api/v1/namespaces/global/cargos") +// .to_request(); +// +// let list_resp = test::call_service(&app, list_req).await; +// +// assert!(list_resp.status().is_success()); +// +// let list_resp_body = test::read_body_json::(list_resp).await; +// assert_eq!(list_resp_body, list_req_data); +// +// +// +// let delete_req = test::TestRequest::delete() +// .uri("/api/v1/namespaces/global/cargos/cargo-case-01") +// .to_request(); +// +// let delete_resp = test::call_service(&app, delete_req).await; +// +// assert!(delete_resp.status().is_success()); +// +// let list_req_data2 = json!([ +// { +// "apiVersion": "v1", +// "kind": "Cargo", +// "metadata": { +// "name": "cargo-case-02", +// "annotations": { +// "k8s.v1.cni.cncf.io/networks": "testns1/macvlan-conf-1" +// } +// }, +// "spec": { +// "containers": [ +// { +// "name": "cargo-case-02", +// "image": "g-ubjg5602-docker.pkg.coding.net/iscas-system/containers/busybox:latest", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// +// let list_req2 = test::TestRequest::get() +// .uri("/api/v1/namespaces/global/cargos") +// .to_request(); +// +// let list_resp2 = test::call_service(&app, list_req2).await; +// +// assert!(list_resp2.status().is_success()); +// +// let list_resp_body2 = test::read_body_json::(list_resp2).await; +// assert_eq!(list_resp_body2, list_req_data2); +// +// } +// +// +// +// #[actix_web::test] +// #[serial] +// async fn test_api_without_namespace() { +// let nats_cli = Arc::new(NatsCli::new().await.unwrap()); +// let event_manager = EventManager::new(); +// +// let app = test::init_service( +// App::new() +// .route("/api/{version}/{plural}", web::post().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.create_api_without_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/{plural}/{name}", web::put().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.update_api_without_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/{plural}/{name}", web::delete().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.delete_api_without_namespace(path, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/{plural}/{name}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.getone_api_without_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/api/{version}/{plural}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.listall_api_without_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// ).await; +// +// +// let create_req_data = json!({ +// "apiVersion": "v1", +// "kind": "Node", +// "metadata": { +// "name": "node-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "node-case-01", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req = test::TestRequest::post() +// .uri("/api/v1/nodes") +// .set_json(&create_req_data) +// .to_request(); +// +// let create_resp = test::call_service(&app, create_req).await; +// +// assert!(create_resp.status().is_success()); +// +// let create_resp_body = test::read_body_json::(create_resp).await; +// assert_eq!(create_resp_body, create_req_data); +// +// +// +// +// +// +// +// let update_req_data = json!({ +// "apiVersion": "v1", +// "kind": "Node", +// "metadata": { +// "name": "node-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "node-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }); +// +// let update_req = test::TestRequest::put() +// .uri("/api/v1/nodes/node-case-01") +// .set_json(&update_req_data) +// .to_request(); +// +// let update_resp = test::call_service(&app, update_req).await; +// +// assert!(update_resp.status().is_success()); +// +// let update_resp_body = test::read_body_json::(update_resp).await; +// assert_eq!(update_resp_body, update_req_data); +// +// +// +// +// let get_req_data = json!({ +// "apiVersion": "v1", +// "kind": "Node", +// "metadata": { +// "name": "node-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "node-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }); +// +// let get_req = test::TestRequest::get() +// .uri("/api/v1/nodes/node-case-01") +// .to_request(); +// +// let get_resp = test::call_service(&app, get_req).await; +// +// assert!(get_resp.status().is_success()); +// +// let get_resp_body = test::read_body_json::(get_resp).await; +// assert_eq!(get_resp_body, get_req_data); +// +// +// +// +// let create_req_data2 = json!({ +// "apiVersion": "v1", +// "kind": "Node", +// "metadata": { +// "name": "node-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "node-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req2 = test::TestRequest::post() +// .uri("/api/v1/nodes") +// .set_json(&create_req_data2) +// .to_request(); +// +// let create_resp2 = test::call_service(&app, create_req2).await; +// +// assert!(create_resp2.status().is_success()); +// +// let create_resp_body2 = test::read_body_json::(create_resp2).await; +// assert_eq!(create_resp_body2, create_req_data2); +// +// +// +// let list_req_data = json!([{ +// "apiVersion": "v1", +// "kind": "Node", +// "metadata": { +// "name": "node-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "node-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }, +// { +// "apiVersion": "v1", +// "kind": "Node", +// "metadata": { +// "name": "node-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "node-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// +// let list_req = test::TestRequest::get() +// .uri("/api/v1/nodes") +// .to_request(); +// +// let list_resp = test::call_service(&app, list_req).await; +// +// assert!(list_resp.status().is_success()); +// +// let list_resp_body = test::read_body_json::(list_resp).await; +// assert_eq!(list_resp_body, list_req_data); +// +// +// +// let delete_req = test::TestRequest::delete() +// .uri("/api/v1/nodes/node-case-01") +// .to_request(); +// +// let delete_resp = test::call_service(&app, delete_req).await; +// +// assert!(delete_resp.status().is_success()); +// +// let list_req_data2 = json!([ +// { +// "apiVersion": "v1", +// "kind": "Node", +// "metadata": { +// "name": "node-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "node-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// let list_req2 = test::TestRequest::get() +// .uri("/api/v1/nodes") +// .to_request(); +// +// let list_resp2 = test::call_service(&app, list_req2).await; +// +// assert!(list_resp2.status().is_success()); +// +// let list_resp_body2 = test::read_body_json::(list_resp2).await; +// assert_eq!(list_resp_body2, list_req_data2); +// +// } +// +// +// +// +// #[actix_web::test] +// #[serial] +// async fn test_apis_with_namespace() { +// let nats_cli = Arc::new(NatsCli::new().await.unwrap()); +// let event_manager = EventManager::new(); +// +// let app = test::init_service( +// App::new() +// .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}", web::post().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.create_apis_with_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}", web::put().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.update_apis_with_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}", web::delete().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.delete_apis_with_namespace(path, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.getone_apis_with_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/namespaces/{namespace}/{plural}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.listall_apis_with_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// ).await; +// +// +// +// let create_req_data = json!({ +// "apiVersion": "batch/v1", +// "kind": "Job", +// "metadata": { +// "name": "job-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "job-case-01", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req = test::TestRequest::post() +// .uri("/apis/batch/v1/namespaces/global/jobs") +// .set_json(&create_req_data) +// .to_request(); +// +// let create_resp = test::call_service(&app, create_req).await; +// +// assert!(create_resp.status().is_success()); +// +// let create_resp_body = test::read_body_json::(create_resp).await; +// assert_eq!(create_resp_body, create_req_data); +// +// +// +// +// +// +// +// let update_req_data = json!({ +// "apiVersion": "batch/v1", +// "kind": "Job", +// "metadata": { +// "name": "job-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "job-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }); +// +// let update_req = test::TestRequest::put() +// .uri("/apis/batch/v1/namespaces/global/jobs/job-case-01") +// .set_json(&update_req_data) +// .to_request(); +// +// let update_resp = test::call_service(&app, update_req).await; +// +// assert!(update_resp.status().is_success()); +// +// let update_resp_body = test::read_body_json::(update_resp).await; +// assert_eq!(update_resp_body, update_req_data); +// +// +// +// +// let get_req_data = json!({ +// "apiVersion": "batch/v1", +// "kind": "Job", +// "metadata": { +// "name": "job-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "job-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }); +// +// let get_req = test::TestRequest::get() +// .uri("/apis/batch/v1/namespaces/global/jobs/job-case-01") +// .to_request(); +// +// let get_resp = test::call_service(&app, get_req).await; +// +// assert!(get_resp.status().is_success()); +// +// let get_resp_body = test::read_body_json::(get_resp).await; +// assert_eq!(get_resp_body, get_req_data); +// +// +// +// +// let create_req_data2 = json!({ +// "apiVersion": "batch/v1", +// "kind": "Job", +// "metadata": { +// "name": "job-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "job-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req2 = test::TestRequest::post() +// .uri("/apis/batch/v1/namespaces/global/jobs") +// .set_json(&create_req_data2) +// .to_request(); +// +// let create_resp2 = test::call_service(&app, create_req2).await; +// +// assert!(create_resp2.status().is_success()); +// +// let create_resp_body2 = test::read_body_json::(create_resp2).await; +// assert_eq!(create_resp_body2, create_req_data2); +// +// +// +// let list_req_data = json!([{ +// "apiVersion": "batch/v1", +// "kind": "Job", +// "metadata": { +// "name": "job-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "job-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }, +// { +// "apiVersion": "batch/v1", +// "kind": "Job", +// "metadata": { +// "name": "job-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "job-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// +// let list_req = test::TestRequest::get() +// .uri("/apis/batch/v1/namespaces/global/jobs") +// .to_request(); +// +// let list_resp = test::call_service(&app, list_req).await; +// +// assert!(list_resp.status().is_success()); +// +// let list_resp_body = test::read_body_json::(list_resp).await; +// assert_eq!(list_resp_body, list_req_data); +// +// +// +// let delete_req = test::TestRequest::delete() +// .uri("/apis/batch/v1/namespaces/global/jobs/job-case-01") +// .to_request(); +// +// let delete_resp = test::call_service(&app, delete_req).await; +// +// assert!(delete_resp.status().is_success()); +// +// let list_req_data2 = json!([ +// { +// "apiVersion": "batch/v1", +// "kind": "Job", +// "metadata": { +// "name": "job-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "job-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// let list_req2 = test::TestRequest::get() +// .uri("/apis/batch/v1/namespaces/global/jobs") +// .to_request(); +// +// let list_resp2 = test::call_service(&app, list_req2).await; +// +// assert!(list_resp2.status().is_success()); +// +// let list_resp_body2 = test::read_body_json::(list_resp2).await; +// assert_eq!(list_resp_body2, list_req_data2); +// +// } +// +// +// #[actix_web::test] +// #[serial] +// async fn test_crd_and_apis_without_namespace() { +// let nats_cli = Arc::new(NatsCli::new().await.unwrap()); +// let event_manager = EventManager::new(); +// +// let app = test::init_service( +// App::new() +// .route("/apis/{group}/{version}/{plural}", web::post().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.create_apis_without_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/{plural}/{name}", web::put().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.update_apis_without_namespace(path, data, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/{plural}/{name}", web::delete().to({ +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// move |path| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// let event_manager = event_manager.clone(); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.delete_apis_without_namespace(path, &mut conn, nats_cli, event_manager).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/{plural}/{name}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.getone_apis_without_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// .route("/apis/{group}/{version}/{plural}", web::get().to({ +// let nats_cli = Arc::clone(&nats_cli); +// +// move |path, data| { +// let handler = HANDLER.clone(); +// let db_pool = DB_POOL.clone(); +// let nats_cli = Arc::clone(&nats_cli); +// +// async move { +// match db_pool.get_connection() { +// Ok(mut conn) => handler.listall_apis_without_namespace(path, data, &mut conn, nats_cli).await, +// Err(_) => Err(ErrorInternalServerError("Failed to get database connection")), +// } +// } +// } +// })) +// ).await; +// +// +// let crd_req_data = json!({ +// "apiVersion": "batch/v1", +// "kind": "CustomResourceDefinition", +// "metadata": { +// "name": "myjob.iscas.cn" +// }, +// "spec": { +// "names": +// { +// "kind": "Myjob", +// "listKind": "MyjobList", +// "plural": "myjobs", +// "singular": "myjob" +// } +// } +// }); +// +// let crd_req = test::TestRequest::post() +// .uri("/apis/batch/v1/crds") +// .set_json(&crd_req_data) +// .to_request(); +// +// let crd_resp = test::call_service(&app, crd_req).await; +// +// assert!(crd_resp.status().is_success()); +// +// let crd_resp_body = test::read_body_json::(crd_resp).await; +// assert_eq!(crd_resp_body, crd_req_data); +// +// +// let create_req_data = json!({ +// "apiVersion": "batch/v1", +// "kind": "Myjob", +// "metadata": { +// "name": "myjob-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "myjob-case-01", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req = test::TestRequest::post() +// .uri("/apis/batch/v1/myjobs") +// .set_json(&create_req_data) +// .to_request(); +// +// let create_resp = test::call_service(&app, create_req).await; +// +// assert!(create_resp.status().is_success()); +// +// let create_resp_body = test::read_body_json::(create_resp).await; +// assert_eq!(create_resp_body, create_req_data); +// +// +// +// +// +// +// +// let update_req_data = json!({ +// "apiVersion": "batch/v1", +// "kind": "Myjob", +// "metadata": { +// "name": "myjob-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "myjob-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }); +// +// let update_req = test::TestRequest::put() +// .uri("/apis/batch/v1/myjobs/myjob-case-01") +// .set_json(&update_req_data) +// .to_request(); +// +// let update_resp = test::call_service(&app, update_req).await; +// +// assert!(update_resp.status().is_success()); +// +// let update_resp_body = test::read_body_json::(update_resp).await; +// assert_eq!(update_resp_body, update_req_data); +// +// +// +// +// let get_req_data = json!({ +// "apiVersion": "batch/v1", +// "kind": "Myjob", +// "metadata": { +// "name": "myjob-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "myjob-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }); +// +// let get_req = test::TestRequest::get() +// .uri("/apis/batch/v1/myjobs/myjob-case-01") +// .to_request(); +// +// let get_resp = test::call_service(&app, get_req).await; +// +// assert!(get_resp.status().is_success()); +// +// let get_resp_body = test::read_body_json::(get_resp).await; +// assert_eq!(get_resp_body, get_req_data); +// +// +// +// +// let create_req_data2 = json!({ +// "apiVersion": "batch/v1", +// "kind": "Myjob", +// "metadata": { +// "name": "myjob-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "myjob-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }); +// +// let create_req2 = test::TestRequest::post() +// .uri("/apis/batch/v1/myjobs") +// .set_json(&create_req_data2) +// .to_request(); +// +// let create_resp2 = test::call_service(&app, create_req2).await; +// +// assert!(create_resp2.status().is_success()); +// +// let create_resp_body2 = test::read_body_json::(create_resp2).await; +// assert_eq!(create_resp_body2, create_req_data2); +// +// +// +// let list_req_data = json!([{ +// "apiVersion": "batch/v1", +// "kind": "Myjob", +// "metadata": { +// "name": "myjob-case-01", +// }, +// "spec": { +// "containers": [ +// { +// "name": "myjob-case-01", +// "command": [ +// "sleep", +// "1" +// ] +// } +// ] +// } +// }, +// { +// "apiVersion": "batch/v1", +// "kind": "Myjob", +// "metadata": { +// "name": "myjob-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "myjob-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// +// let list_req = test::TestRequest::get() +// .uri("/apis/batch/v1/myjobs") +// .to_request(); +// +// let list_resp = test::call_service(&app, list_req).await; +// +// assert!(list_resp.status().is_success()); +// +// let list_resp_body = test::read_body_json::(list_resp).await; +// assert_eq!(list_resp_body, list_req_data); +// +// +// +// let delete_req = test::TestRequest::delete() +// .uri("/apis/batch/v1/myjobs/myjob-case-01") +// .to_request(); +// +// let delete_resp = test::call_service(&app, delete_req).await; +// +// assert!(delete_resp.status().is_success()); +// +// let list_req_data2 = json!([ +// { +// "apiVersion": "batch/v1", +// "kind": "Myjob", +// "metadata": { +// "name": "myjob-case-02", +// }, +// "spec": { +// "containers": [ +// { +// "name": "myjob-case-02", +// "command": [ +// "sleep", +// "3600" +// ] +// } +// ] +// } +// }]); +// let list_req2 = test::TestRequest::get() +// .uri("/apis/batch/v1/myjobs") +// .to_request(); +// +// let list_resp2 = test::call_service(&app, list_req2).await; +// +// assert!(list_resp2.status().is_success()); +// +// let list_resp_body2 = test::read_body_json::(list_resp2).await; +// assert_eq!(list_resp_body2, list_req_data2); +// +// } +// } diff --git a/tests/server_tests.rs b/tests/server_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..0e0abce28ee591a430763314e8da596bafd8cf96 --- /dev/null +++ b/tests/server_tests.rs @@ -0,0 +1,322 @@ +#[cfg(test)] +mod tests { + use super::*; + use actix_service::ServiceFactory; + use actix_web::dev::ServiceRequest; + use actix_web::{test, web, App, Error, HttpResponse}; + use anyhow::Result; + use dotenv::dotenv; + use env_logger::{Builder, Target}; + use feventbus::traits::controller::EventBus; + use fleet_apiserver::cores::apiserver::{AppState, K8sStylePathParams, K8sStyleRoute}; + use fleet_apiserver::cores::events::{APIServerEventClient, P2PEventServer}; + use fleet_apiserver::cores::handlers::{APIServerResponse, Handler}; + use fleet_apiserver::cores::services::{APIServerResult, APIServerServiceParams, APIServerServiceParamsBuilder, APIServerStatusCode}; + use fleet_apiserver::{prepare_app_state, start_server}; + use fleetmod::pod::Pod; + use log::log; + use once_cell::sync::Lazy; + use serde_json::{json, Value}; + use serial_test::serial; + use std::env; + use std::sync::atomic::AtomicBool; + use std::sync::Arc; + use std::thread::spawn; + use tokio::time::sleep; + + + const DATABASE_URL: &str = "sqlite://./test-database.sqlite"; + + #[macro_export] + macro_rules! setup_test_app {() => { + { + let app_state = setup().await.unwrap(); + let app = App::new().app_data(web::Data::new(app_state.clone())); + let routes = K8sStyleRoute::get_routes(); + let app = routes.iter().fold(app, |app, route| { + log::info!("register route: {} {}", route.get_method(), route.get_path()); + app.route(route.get_path().as_str(), route.get_web_route()) + }); + let app = app.default_service(web::to(move || async { + log::error!("invalid url"); + Ok::( + HttpResponse::NotFound().body("invalid url, see https://gitee.com/iscas-system/apiserver/wikis/pages?sort_id=12661944&doc_id=6135774")) + })); + let app = test::init_service(app).await; + (app, app_state) + }}; + } + + async fn setup() -> Result { + let mut builder = Builder::from_default_env(); + builder.target(Target::Stdout); + builder.init(); + + log::info!("Setting DATABASE_URL to {}", DATABASE_URL); + let app_state = prepare_app_state(DATABASE_URL).await; + if let Err(e) = app_state { + log::error!("Failed to prepare app state: {:?}", e); + return Err(e); + } + let app_state = app_state?; + // 启动watch相关事件监听协程 + app_state.clone().watch_event_publisher.start(); + // 启动P2P事件监听协程 + P2PEventServer::new(Arc::from(app_state.clone())).start(); + // 等待启动 + sleep(std::time::Duration::from_secs(1)).await; + Ok(app_state.clone()) + } + + #[tokio::test] + #[serial] + async fn test_pod_basic() { + let (app, app_state) = setup_test_app!(); + + let name = "test-create-pod".to_string(); + let namespace = "ns1".to_string(); + let version = "v1".to_string(); + let plural = "pods".to_string(); + let pod_value = mock_pod(name.clone(), Some(namespace.clone())); + let pod: Pod = serde_json::from_value(pod_value.clone()).unwrap(); + let with_name_params = K8sStylePathParams { group: None, version: version.clone(), namespace: Some(namespace.clone()), name: Some(name.clone()), plural: plural.clone() }; + let without_name_params = K8sStylePathParams { group: None, version: version.clone(), namespace: Some(namespace.clone()), name: None, plural: plural.clone() }; + + // delete it first + let pod_delete_url = get_url(with_name_params.clone()); + log::info!("pod_delete_url: {}", pod_delete_url); + let req = test::TestRequest::delete().uri(pod_delete_url.as_str()).to_request(); + let res: APIServerResponse = test::call_and_read_body_json(&app, req).await; + log::info!("delete res {:?}", res); + + // test create + let pod_create_url = get_url(without_name_params.clone()); + let req = test::TestRequest::post() + .uri(pod_create_url.as_str()) + .set_json(&pod_value) + .to_request(); + let res: APIServerResponse = test::call_and_read_body_json(&app, req).await; + log::info!("create res {:?}", res); + assert_eq!(res.status_code, APIServerStatusCode::OK); + let pod_from_create = serde_json::from_value(res.data.unwrap()).unwrap(); + assert_eq!(pod, pod_from_create); + + // test get one + let pod_getone_url = get_url(with_name_params.clone()); + let req = test::TestRequest::get().uri(pod_getone_url.as_str()).to_request(); + let res: APIServerResponse = test::call_and_read_body_json(&app, req).await; + log::info!("getone res {:?}", res); + assert_eq!(res.status_code, APIServerStatusCode::OK); + assert!(res.data.is_some()); + let pod_from_getone: Pod = serde_json::from_value(res.data.unwrap()).unwrap(); + assert_eq!(pod, pod_from_getone); + + // test list + let pod_list_url = get_url(without_name_params.clone()); + let req = test::TestRequest::get().uri(pod_list_url.as_str()).to_request(); + let res: APIServerResponse = test::call_and_read_body_json(&app, req).await; + log::info!("list res {:?}", res); + assert_eq!(res.status_code, APIServerStatusCode::OK); + assert!(res.data.is_some()); + let pods_from_list: Vec = serde_json::from_value(res.data.unwrap()).unwrap(); + let found = pods_from_list.iter().any(|p| p.metadata.name == name); + assert!(found); + let pod_from_list = pods_from_list.iter().find(|p| p.metadata.name == name).unwrap(); + assert_eq!(pod, *pod_from_list); + + // test patch + let pod_patch_url = get_url(with_name_params.clone()); + let req = test::TestRequest::patch() + .uri(pod_patch_url.as_str()) + .set_json(&json!({"metadata": {"labels": {"app": "my-app-patched", "patch-new-key": "patch-new-value"}}})) + .to_request(); + let mut target_pod = pod.clone(); + target_pod.metadata.labels.as_mut().unwrap().insert("app".to_string(), "my-app-patched".to_string()); + target_pod.metadata.labels.as_mut().unwrap().insert("patch-new-key".to_string(), "patch-new-value".to_string()); + let res: APIServerResponse = test::call_and_read_body_json(&app, req).await; + log::info!("patch res {:?}", res); + assert_eq!(res.status_code, APIServerStatusCode::OK); + let pod_patched: Pod = serde_json::from_value(res.data.unwrap()).unwrap(); + assert_eq!(target_pod, pod_patched); + + // test delete + let pod_delete_url = get_url(with_name_params.clone()); + let req = test::TestRequest::delete().uri(pod_delete_url.as_str()).to_request(); + let res: APIServerResponse = test::call_and_read_body_json(&app, req).await; + log::info!("delete res {:?}", res); + assert_eq!(res.status_code, APIServerStatusCode::OK); + let pod_deleted: Pod = serde_json::from_value(res.data.unwrap()).unwrap(); + assert_eq!(target_pod, pod_deleted); + let req = test::TestRequest::get().uri(pod_getone_url.as_str()).to_request(); + let res: APIServerResponse = test::call_and_read_body_json(&app, req).await; + log::info!("get res {:?}", res); + assert_eq!(res.status_code, APIServerStatusCode::NotFound); + + // test by eventbus + let event_cli = APIServerEventClient::new(app_state.nats_cli, None); + let with_name_params = APIServerServiceParams::from(with_name_params.clone()); + let without_name_params = APIServerServiceParams::from(without_name_params.clone()); + + // test create + let res = event_cli + .create_by_resource(pod.clone()) + .await; + log::info!("create res {:?}", res); + assert_eq!(pod, res.ok().unwrap()); + + // test get one + let res = event_cli + .get(with_name_params.clone(), Value::Null) + .await; + log::info!("getone res {:?}", res); + assert!(res.is_ok()); + assert_eq!(pod, res.ok().unwrap()); + + // test list + let res = event_cli + .get(without_name_params.clone(), Value::Null) + .await; + log::info!("list res {:?}", res); + assert!(res.is_ok()); + let pods_from_list: Vec = res.ok().unwrap(); + let found = pods_from_list.iter().any(|p| p.metadata.name == name); + assert!(found); + let pod_from_list = pods_from_list.iter().find(|p| p.metadata.name == name).unwrap(); + assert_eq!(pod, *pod_from_list); + + // test patch + let mut target_pod = pod.clone(); + target_pod.metadata.labels.as_mut().unwrap().insert("app".to_string(), "my-app-patched".to_string()); + target_pod.metadata.labels.as_mut().unwrap().insert("patch-new-key".to_string(), "patch-new-value".to_string()); + let res = event_cli + .patch(with_name_params.clone(), json!({"metadata": {"labels": {"app": "my-app-patched", "patch-new-key": "patch-new-value"}}})).await; + log::info!("patch res {:?}", res); + assert!(res.is_ok()); + assert_eq!(target_pod, res.ok().unwrap()); + + // test delete + let res = event_cli + .delete(with_name_params.clone()) + .await; + log::info!("delete res {:?}", res); + assert_eq!(target_pod, res.ok().unwrap()); + let res: APIServerResult = event_cli + .get(with_name_params.clone(), Value::Null) + .await; + log::info!("get res {:?}", res); + assert!(res.is_err()); + assert_eq!(res.err().unwrap().status_code, APIServerStatusCode::NotFound); + } + + fn get_url(params: K8sStylePathParams) -> String { + let with_group = params.group.is_some(); + let with_namespace = params.namespace.is_some(); + let with_name = params.name.is_some(); + let prefix = if with_group { + format!("/apis/{}/", params.group.unwrap()) + } else { + "/api/".to_string() + }; + let version_and_namespace = if with_namespace { + format!("{}/namespaces/{}/", params.version, params.namespace.unwrap()) + } else { + format!("{}/", params.version) + }; + let plural_and_name = if with_name { + format!("{}/{}", params.plural, params.name.unwrap()) + } else { + format!("{}", params.plural) + }; + format!("{}{}{}", prefix, version_and_namespace, plural_and_name) + } + + fn mock_pod(name: String, namespace: Option) -> Value { + let pod_yaml = r#" +apiVersion: v1 +kind: Pod +metadata: + name: example-pod + namespace: default + labels: + app: my-app + creationTimestamp: 2024-12-10T08:00:00+08:00 +spec: + nodeName: my-node + hostname: my-hostname + hostAliases: + - ip: "127.0.0.1" + hostnames: + - "my-local-host" + - "another-host" + containers: + - name: example-container + image: example-image:latest + imagePullPolicy: IfNotPresent + command: ["nginx"] + args: ["-g", "daemon off;"] + workingDir: /usr/share/nginx/html + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + env: + - name: ENV_MODE + value: production + - name: ENV_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.name + resources: + requests: + memory: "128Mi" + cpu: "250m" + limits: + memory: "512Mi" + cpu: "1" + volumeMounts: + - name: config-volume + mountPath: /etc/nginx/conf.d + readOnly: true + - name: data-volume + mountPath: /usr/share/nginx/html + - name: data-host-volume + mountPath: /usr/share/nginx/a.txt + volumeDevices: + - name: device-volume + devicePath: /dev/sdb + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: true + privileged: true + volumes: + - name: example-volume + configMap: + name: nginx-config + - name: data-volume + emptyDir: {} + - name: device-volume + hostPath: + path: /dev/sdb + type: Directory + - name: device-volume + hostPath: + path: /dev/sdb + type: Directory +status: + phase: Pending + message: begin handle + podIP: 10.42.0.9 + podIPs: + - ip: 10.42.0.9 +"#; + let mut pod: Pod = serde_yaml::from_str(pod_yaml).expect("Failed to parse YAML"); + pod.metadata.name = name.to_string(); + pod.metadata.namespace = namespace.map(|s| s.to_string()); + serde_json::to_value(pod).unwrap() + } +}