diff --git a/rdnf/.cargo/config.toml b/rdnf/.cargo/config.toml new file mode 100644 index 0000000000000000000000000000000000000000..31f39e7c6b1e00eec56573ddd4e2484f20b5d6f6 --- /dev/null +++ b/rdnf/.cargo/config.toml @@ -0,0 +1,6 @@ +[target.x86_64-unknown-linux-gnu] +linker="clang" +rustflags=["-C","link-arg=-fuse-ld=/usr/bin/mold"] + +#[build] +#rustc-wrapper = "/root/.cargo/bin/sccache" diff --git a/rdnf/.vscode/extensions.json b/rdnf/.vscode/extensions.json deleted file mode 100644 index fe0411f3c2eab747a0271ed21a2cad2bb85c8e39..0000000000000000000000000000000000000000 --- a/rdnf/.vscode/extensions.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "recommendations": [ - "vadimcn.vscode-lldb" - ] -} \ No newline at end of file diff --git a/rdnf/.vscode/launch.json b/rdnf/.vscode/launch.json index 02e79d79f770d49252cc7abeda32e5737cccb5c5..2527f84a4249b3ce7fcc72ab59bf27fad829728c 100644 --- a/rdnf/.vscode/launch.json +++ b/rdnf/.vscode/launch.json @@ -3,28 +3,38 @@ // 悬停以查看现有属性的描述。 // 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", - "configurations": [ + "configurations": [ + { "type": "lldb", "request": "launch", - "name": "Cargo launch", + "name": "Debug executable 'rdnf'", "cargo": { "args": [ "build", - "--lib" - ] + "--bin=rdnf", + "--package=rdnf" + ], + "filter": { + "name": "rdnf", + "kind": "bin" + } }, - "args": [] + "args": [ + "install", + "vim-enhanced" + ], + "cwd": "${workspaceFolder}" }, - { "type": "lldb", "request": "launch", - "name": "Debug executable 'rdnf'", + "name": "Debug unit tests in executable 'rdnf'", "cargo": { "args": [ - "build", + "test", + "--no-run", "--bin=rdnf", "--package=rdnf" ], @@ -33,10 +43,11 @@ "kind": "bin" } }, - // "args": ["install","rpm","rpm-devel","file:///meg-0.2.4-5.fc36.x86_64.rpm","http://mirrors.aliyun.com/fedora/releases/36/Everything/x86_64/os/Packages/i/ibacm-39.0-1.fc36.x86_64.rpm"], - "args": ["install","nodejs"], - // "args":["install","http://mirrors.aliyun.com/fedora/releases/36/Everything/x86_64/os/Packages/i/ibacm-39.0-1.fc36.x86_64.rpm"], - "cwd": "${workspaceFolder}", - }, + "args": [ + "install", + "vim-enhanced" + ], + "cwd": "${workspaceFolder}" + } ] } \ No newline at end of file diff --git a/rdnf/.vscode/settings.json b/rdnf/.vscode/settings.json new file mode 100644 index 0000000000000000000000000000000000000000..352a6265a0dc59187ffa576fac072572036fb463 --- /dev/null +++ b/rdnf/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "rust-analyzer.linkedProjects": [ + "./Cargo.toml" + ] +} \ No newline at end of file diff --git a/rdnf/Cargo.toml b/rdnf/Cargo.toml index 4896f8828b879dcaf7b098ee8d25d66624d90295..8228ce303244078f2ea1a3e08bb70fc90d94890c 100644 --- a/rdnf/Cargo.toml +++ b/rdnf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rdnf" -version = "0.1.0" +version = "0.2.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -8,32 +8,39 @@ edition = "2021" default=["zh_CN"] en_US=[] zh_CN=[] - - [dependencies] +tokio={version="1",features=["rt-multi-thread","macros","fs","time"]} clap = {version="4",features = ["derive"]} +anyhow = "1" +serde={version = "*",features = ["serde_derive"]} +serde_json = "1" +libc="0.2" +thiserror = "1" config = "0.13" +glob="0.3" console="0.15" -glob="0.3.0" +md5="*" +uuid={version="1"} +reqwest = { version = "0.11.11", features = ["blocking","stream"] } +futures-util = "0.3.27" +base64="0.21" indicatif = "0.17.0" -dialoguer = "0.10" -# libsolv-sys = "0.1.4" -rustix={version = "*",features = ["process"]} -anyhow = "1.0.60" -lazy_static = "1.4.0" -curl="0.4" -tokio={version = "1.20",features = ["rt","macros"]} -libc="0.2.119" -serde={version = "*",features = ["serde_derive"]} -rpm-sys={path="./rpm-sys",version="*"} -solv-sys={path="./solv-sys",version="*"} -quick-xml={version = "0.25"} +quick-xml = { version = "0.28", features = ["serialize"] } +hex="*" sha-1="*" sha2="*" -md-5="*" -hex="*" -chrono="0.4" +async-compression = {version="0.3.15",features=["stream","gzip","tokio"]} +async-channel="1" +num_cpus="*" +indradb-lib = { version = "4", features = ["rocksdb-datastore"] } +memmap2="*" +rocksdb="*" +aho-corasick = "0.7" +rpm={path="./rpm"} +rusqlite = {version="0.29",features=["modern-full"]} +varisat = "0.2" +async-recursion = "1.0.4" +[dev-dependencies] +assert_cmd="2" -[build-dependencies] -cc="1" \ No newline at end of file diff --git a/rdnf/README.md b/rdnf/README.md index cffc3e172a6f2b2a03d9552c884c5e014d6d5683..d0b4be6b38a0e1991f19eebafa1b8ea1ed841ae3 100644 --- a/rdnf/README.md +++ b/rdnf/README.md @@ -1,33 +1,153 @@ -## 简介 -+ 此项目是*tdnf*的rust实现版,主要依托于*rpm-devel*,*libsolv-devel* -+ 目前已经实现了核心功能即 *repolist*,*makecache*,*search*,*install*,*reinstall*,*remove*,*update*,*info* -+ 此项目准确的说是rpm-devel和libsolv-devel的整合品。故含有大量的代码是rust对c的ffi绑定,具有一定参考价值。 -+ 项目局限性:tdnf主要是为了photon而生,而photon是轻量级容器环境,故软件包仓库的软件数量少,元数据信息少,tdnf的性能可以应付,但对于像fedora拥有丰富的软件包,tdnf解决软件包依赖冲突问题需要长达接近20s的时间。故本项目有进一步改进的空间。 - -### 项目解析 -1. 首先解析命令行参数 clap是rust生态主流的命令行编程框架,简单高效。 -2. 解析配置文件 */etc/dnf/dnf.conf*, 使用config crates包解析,映射成 rust对象。 -3. 读取 */etc/yum.repos.d/* 文件夹下所有.repo文件,读取各个软件仓库的数据, 映射成rust对象。 -4. 初始化libsolv,创建pool对象。 - -上述5个步骤,整合成Rdnf,形成全局上下文环境。 -#### repolist -此命令在于简单显示已经读取的repo仓库信息,可显示已启用或禁用的repo仓库。 -#### *makecache* -此命令至关重要,是整个项目的重要基石,即读取已启用的repo的仓库,下载镜像站的索引文件、软件包的元数据,基于libsolv生成.solv格式的缓存文件,若有缓存文件,跳过1,2,3。 -1. 首先下载 repomd.xml文件,有两种方式: - + 基于.repo配置文件,若有baseurl配置项,即baseurl+*repodata/repomd.xml* 即可得 repomd.xml的下载链接 - + 基于.repo配置文件,若无baseurl但有metalink配置项,下载metalink.xml文件,解析metalink.xml文件,可获得repomd.xml的下载链接。 -2. 解析 repomd.xml文件,含有 primary,filelists,other,updateinfo等xml文件的元数据,再基于baseurl即可得对应的下载链接,这些xml文件含有对应repo镜像仓库的软件包元数据,保存于/var/cache/rdnf/repodata文件夹下。 -3. 基于libsolv,将repo仓库的primary等xml文件生成.solv文件,保存于/var/cache/rdnf/solvcache/文件夹下 -4. 再将.solv加载到pool中,pool可以理解为是libsolv环境中的上下文环境 - -#### search -搜索软件包,基于makecache命令,将xml文件生成.solv,再加载到pool,利用pool和libsolv提供的接口可轻松搜索软件包的元数据信息。 - -#### install,remove,update,reinstall -这几个命令操作类似。 -主要思路为两大部分,先是基于libsolv解决软件包依赖冲突问题,再使用rpm-devel解决rpm软件包具体安装问题。 -+ 使用是prepare_all_pkgs, 初步解决是否已经安装,安装的是否是最新版本的问题,将已经初步处理过,需要进一步解决的软件包id放入queue_goal中,在goal中完成主要通过*solver_solve*解决软件包依赖冲突问题,生成安装或卸载列表,然后基于列表中的软件包id获取软件包的详细信息,例如软件包架构,版本,大小的信息。 -+ 生成rpmts,可以理解为rpm-devel环境中的上下文环境,根据上述的软件包列表,下载软件包,若有必要,需要解决gpgkey问题。根据需要设置rpm事务的参数flag,并设置rpm回调函数(由于c难以回调rust函数,故直接使用c实现),由rpm-devel解决具体的rpm安装事务,在执行过程中,回调之前设置的函数,实现单个软件包安装进度的打印显示。 +# rdnf 0.2 思路 +## indradb + +indradb图数据库是基于kv存储引擎,主要是基于rocksdb。基本元素主要有三:Edge、Vertex、Property(包含edge_property、vertex_property)。 + +原理如下 + ++ VertexManager + + + key:vertex.id + + value:vertex.identifier + ++ EdgeRangeManager + + + key:[edge.outbound_id,edge.identifier,edge.inbound_id] + + value:null + + 因为key是排序过的,故可通过迭代的方式查找对应 inbound_id的集合,即vertex.id集合。 + + 同理可得ReversedEdgeRangeManager + + + key[edge.inbound_id,edge.identifier,edge.outbound_id] + + value:null + ++ VertexPropertyManager + + + key:[vertex.id,identifier] + + value:json + ++ EdgePropertyManager + + + key:[edge.outbound_id,edge.identifier,edge.inbound_id,identifier] + + value:json + ++ VertexPropertyManager + + + key:[identifier,json,vertex_id] + + value:null + + 通过迭代遍历排序过的key,得到对应的vertex_id,或得到对应的json和vertex_id + ++ EdgePropertyManager + + + key:[identifier,json,edge.outbound_id,edge.identifier,edge.inbound] + + value:null + + 同上 + +## 图数据库建模 + +![rdnf.drawio](./rdnf.drawio.png) + +最初的方案设计是name、arch单独作为一个(identifer,value),但是这会造成大量kv键值对,数据库的写入压力非常大。故而软件包的元数据信息包装成为pkgdetail和formatdetail。同样的,将requires由类似与provides的边关系,转变为property。 + +每个package provides多个entry,每个package requires多个entry。 + +## Repo + +按照优先级有三层repo + ++ cmd_repo:有这样的场景: *dnf install file:///....x86_64.rpm https://.....i686.rpm* 将url的rpm下载到本地,将这些本地的rpm文件,解析其头文件,将元数据导入到cmd_repo,由于是用户指定的rpm文件,故而优先级最高。 ++ installed_repo: 操作系统将已经安装后的rpm包的元数据信息,导入到rpmdb.sqlite文件中,路径一般为 **/var/lib/rpm/rpmdb.sqlite** ,由于操作的不便,目前采用的是将rpmdb.sqlite的数据导入到installed_repo,根据rpmdb.sqlite的*sqlite_sequence* 和installed_seq中的记载来判断是否需要同步。在使用rdnf的同时,对rpmdb.sqlite文件加锁。 ++ repos: 将多个远程的软件包仓库的元数据xml文件解析成Vec\ + +## SAT依赖解析 + +每个package requires多个Entry + +对每个requier_entry:,将其解析为SolveItem + +```rust +pub(self) struct SolveItem { + entry_name: Arc, + ver: Version, + flag: Option, +} +其中,pub struct Version { + pub epoch: Option, + pub version: Option, + pub release: Option, +} +``` + +依次从cmd_repo、installed_repo、repos查找能够提供满足需要的Entry的Package ($P_i,i=1,2,..m$)。 + ++ provide 语义 + +$$ +(P_1 \vee P_2 \vee P_3..\vee P_m \vee \neg Entry_1)\\ +当 Entry_1 为True,则P_1..P_m中至少有一个必须为True +$$ + + ++ require 语义:对于Package的reuqire Entry + +$$ +(Entry_i \vee \neg Package)\\ +当Package为True,则Entry_i必为True \\ +由公式(1)可得,提供Entry_i的多个package至少有一个为True. +$$ + ++ conflict : $Package_A$对于conflict entry,满足entry条件的多个package ($P_i,i=1,2,..m$) + +$$ +(\neg P_A \vee \neg P_i) \wedge (\neg P_A \vee \neg P_2) ...\wedge (\neg P_A \vee \neg P_m) +$$ ++ obsolete同上。 + + + +当Require为Term时,即类似于**((feh and xrandr) if Xserver)**的 +$$ +(\neg Package \vee Term_{out})\wedge (\neg Term_{out} \vee \neg Entry_{Xserver}\vee Term_{in}) \\ +\wedge (\neg Term_{in} \vee Entry_{feh}) \wedge (\neg Term_{in} \vee Entry_{xrandr}) +$$ + ++ 对于or 语义 (a or b or c),a、b、c既可以是Entry_id也可以是Term_id + +$$ +(A \vee B \vee C \vee \neg Term) +$$ + ++ 对于and 语义(a and b and c) + $$ + (\neg Term \vee A)\wedge (\neg Term \vee B) \wedge (\neg Term \vee c) + $$ + ++ 对于 if 语义 (m if p) + $$ + Term \rightarrow (P \rightarrow M) \\ + (\neg Term \vee \neg P \vee M) + $$ + ++ 对于 if else 语义 (m if p else n) + $$ + Term \rightarrow (P \rightarrow M) \\ + Term \rightarrow (\neg P \rightarrow N) \\ + (\neg Term \vee \neg P \vee M) \wedge (\neg Term \vee P \vee N) + $$ + + +​ unless unless else 同上。 + ++ 对于with 语义,通过满足多个Entry的多个pkg 的交集,without 即减集 + + + +## 下一步目标 + ++ makecache 构建缓存后,再次调用 rdnf其他命令,可能会出现报错(某个文件不存在)(Bug) ++ 目前使用sat算法求解器是 varisat,该求解器的策略是,用最少数量为True的变量满足所有的Clause,不符合要求。在or 语义和provide语义中,package应该是存在优先级的,即先按arch(x86_64一定是优于i686)、然后是按repo排序(cmd_repo > install_repo > repos,其中repos是按配置文件中的priority排序)。需要对sat求解算法改进。 ++ 在本地建立缓存,使用的rocksdb效果不是很理想,kv分离的lsm树存储引擎比较理想,例如badger。 \ No newline at end of file diff --git a/rdnf/assest.tar.gz b/rdnf/assest.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..e3fe88fddb8fde87b626704602792a9ea3235ed4 Binary files /dev/null and b/rdnf/assest.tar.gz differ diff --git a/rdnf/build.rs b/rdnf/build.rs deleted file mode 100644 index 9c9d971376a1c59214cfc3541cf95792172f89fb..0000000000000000000000000000000000000000 --- a/rdnf/build.rs +++ /dev/null @@ -1,6 +0,0 @@ -fn main(){ - cc::Build::new() - .file("src/c_lib/queue_static.c") - .file("src/c_lib/rpm_trans.c") - .compile("queue_static"); -} \ No newline at end of file diff --git a/rdnf/rdnf.drawio.png b/rdnf/rdnf.drawio.png new file mode 100644 index 0000000000000000000000000000000000000000..43acb64ae7d7fffccb29187a71f6cd234467ac2b Binary files /dev/null and b/rdnf/rdnf.drawio.png differ diff --git a/rdnf/rpm-sys/Cargo.toml b/rdnf/rpm-sys/Cargo.toml deleted file mode 100644 index a0ee17ed3a0396c63fe79b011eafae2ba4ae7575..0000000000000000000000000000000000000000 --- a/rdnf/rpm-sys/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "rpm-sys" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -libc='0.2' - -[build-dependencies] -bindgen = "0.60.1" -cc = "1" -pkg-config = "0.3.25" -cmake = "0.1.48" -anyhow = "1" diff --git a/rdnf/rpm-sys/build.rs b/rdnf/rpm-sys/build.rs deleted file mode 100644 index 1e4e47d18cf859aedced894999237d1989917bb2..0000000000000000000000000000000000000000 --- a/rdnf/rpm-sys/build.rs +++ /dev/null @@ -1,50 +0,0 @@ -use anyhow::Result; -use std::{fs, path::Path}; -const ALLOWED_FUNC_PREFIX: &[&str] = &[ - "arg", "header", "rpm", "F", "fd", "pgp", "ri", "rs", "rr", "rc", "rm", "url", -]; -const ALLOWED_TYPE_PREFIX: &[&str] = &[ - "ARG", - "Header", - "HEADER", - "header", - "rpm", - "poptContext", - "FD", - "off", - "pgp", - "DIGEST", - "fnpyKey", - "url", -]; -fn main() -> Result<()> { - let conf = pkg_config::Config::new(); - let lib = conf.probe("rpm")?; - for inc in lib.include_paths { - // println!("{:?}",inc); - if inc.join("rpm").is_dir() { - let include_path = inc.join("rpm"); - let output = std::env::var("OUT_DIR")?; - let mut builder = bindgen::Builder::default() - .header(include_path.join("argv.h").to_str().unwrap()) - .header(include_path.join("header.h").to_str().unwrap()) - .header(include_path.join("rpmtypes.h").to_str().unwrap()); - for inc in fs::read_dir(include_path)? { - let inc = inc?; - let name = inc.file_name(); - let name = name.to_string_lossy(); - if name.starts_with("rpm") && name.ends_with(".h") { - builder = builder.header(inc.path().to_str().unwrap()); - } - } - builder - .allowlist_type(format!("({}).*", ALLOWED_TYPE_PREFIX.join("|"))) - .allowlist_var(".*") - .allowlist_function(format!("({}).*", ALLOWED_FUNC_PREFIX.join("|"))) - .generate() - .unwrap() - .write_to_file(Path::new(&output).join("bindings.rs"))?; - } - } - Ok(()) -} diff --git a/rdnf/rpm-sys/src/ffi.rs b/rdnf/rpm-sys/src/ffi.rs deleted file mode 100644 index 66e09db5b29097991743c9c24e7450b5e98b1f27..0000000000000000000000000000000000000000 --- a/rdnf/rpm-sys/src/ffi.rs +++ /dev/null @@ -1,2 +0,0 @@ -#![allow(warnings)] -include!(concat!(env!("OUT_DIR"), "/bindings.rs")); diff --git a/rdnf/rpm-sys/src/lib.rs b/rdnf/rpm-sys/src/lib.rs deleted file mode 100644 index ff83b69d11c99fb29c76e0a09a7ef46b4573e01e..0000000000000000000000000000000000000000 --- a/rdnf/rpm-sys/src/lib.rs +++ /dev/null @@ -1,16 +0,0 @@ -pub mod ffi; - -pub fn add(left: usize, right: usize) -> usize { - left + right -} - -#[cfg(test)] -mod tests { - use super::*; - #[test] - fn it_works() { - unsafe { - let rpmts = ffi::rpmtsCreate(); - } - } -} diff --git a/rdnf/rpm/.gitignore b/rdnf/rpm/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f48bd9018e5cdf7772fd840411b99350430833c7 --- /dev/null +++ b/rdnf/rpm/.gitignore @@ -0,0 +1,8 @@ +/target +**/*.rs.bk +out +**/*.rpm +Cargo.lock +!test_assets/* +dnf-cache +.idea \ No newline at end of file diff --git a/rdnf/rpm/CHANGELOG.md b/rdnf/rpm/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..78239996209b4b7696828c820e8829a115b7567e --- /dev/null +++ b/rdnf/rpm/CHANGELOG.md @@ -0,0 +1,48 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +- Added a `RPMPackage::open()` helper for working with files +- Set RPMTAG_ENCODING to "utf-8" on all built RPMs + +### Fixed + +- Added `rpmlib()` dependencies to built packages as appropriate + +### Breaking Changes + +- Bump MSRV to 1.63.0 +- Removed async support from default features +- Removed `Lead` from the public API. `Lead` is long-deprecated and shouldn't be relied on. + Restricted the data we write to `Lead` to the bare minimum required for compatibility. + +## 0.9.0 + +### Breaking Changes + +- Bump MSRV to 1.60.0 +- Changed a couple of APIs to use unsigned integers instead of signed integers where appropriate +- Moved pre-defined helpers for common package metadata (such as name, version, file lists, etc.) + from `$pkg.metadata.header` to `$pkg.metadata` +- Removed the `$pkg.metadata.get_file_ima_signature_length()` function + +### Added + +- Forked from `rpm-rs` at version 0.8.1 +- Relicensed as MIT + Apache 2.0 after obtaining consent from all contributors +- Added additional helper methods for retrieving commonly used metadata +- Add vendor, url and vcs metadata optional fields to RPMBuilder + +### Fixed + +- Updated dependencies to latest versions +- Fix up most issues when compiling with --no-default-features. +- Fixed an issue with improper package signing + +[Unreleased]: https://github.com/rpm-rs/rpm-rs/compare/vTODO...HEAD diff --git a/rdnf/rpm/Cargo.toml b/rdnf/rpm/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..fe01431db27715d7cf32c53c4061d0cba32a0af7 --- /dev/null +++ b/rdnf/rpm/Cargo.toml @@ -0,0 +1,72 @@ +[package] +name = "rpm" +version = "0.9.0" +authors = [ + "René Richter ", + "Bernhard Schuster ", + "Max Dymond ", + "Daniel Alley " +] +edition = "2021" +license = "Apache-2.0 OR MIT" +description = "A pure rust library for building and parsing RPMs" +homepage = "https://github.com/rpm-rs/rpm" +repository = "https://github.com/rpm-rs/rpm" +readme = "README.md" +keywords = ["RPM", "packaging"] +categories = ["parsing", "development-tools"] +rust-version = "1.63.0" + +[lib] +name = "rpm" + +[dependencies] +thiserror = "1" +nom = "7" +num-traits = "0.2" +num-derive = "0.3" +num = "0.4" +enum-primitive-derive = "0.2" +enum-display-derive = "0.1" +cpio = "0.2" +# consider migrating to flate2 +libflate = "1" +sha2 = "0.10" +md-5 = "0.10" +sha1 = "0.10" +rand = { version = "0.8" } +pgp = { version="0.10", optional = true } +chrono = "0.4" +log = "0.4" +itertools = "0.10" +hex = { version = "0.4", features = ["std"] } +zstd = "0.12.0" +futures = { version = "0.3.25", optional = true } +rusqlite = "0.29.0" +# Libraries required for with_file_async() implementations +async-std = { version = "1.12.0", optional = true } +tokio = {version = "1", optional = true} +tokio-util = { version = "0.7.4", features = ["compat"], optional = true} + +[dev-dependencies] +rsa = { version = "0.8" } +rsa-der = { version = "^0.3.0" } +env_logger = "0.10.0" +serial_test = "1.0" + +# Use for testing async files when async-futures enabled +tokio = {version = "1", features = ["full"]} +tokio-util = { version = "0.7.4", features = ["compat"]} + +[features] +default = ["signature-pgp"] + +signature-pgp = ["signature-meta", "pgp"] +signature-meta = [] +async-futures = ["futures"] + +# The use of these features implies that async-futures are being used - +# explicitly define this feature. +test-with-podman = ["async-futures", "signature-meta", "tokio-util"] +with-file-async-tokio = ["async-futures", "tokio/fs", "tokio-util"] +with-file-async-async-std = ["async-futures", "async-std"] diff --git a/rdnf/rpm/LICENSE-APACHE b/rdnf/rpm/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..ef7b82aaeea9144b6bf3d2e4d3fa6d7743d43a25 --- /dev/null +++ b/rdnf/rpm/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019 René Richter + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/rdnf/rpm/LICENSE-MIT b/rdnf/rpm/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..468cd79a8f6e50f2b24558c41ed3abafa5bb40ae --- /dev/null +++ b/rdnf/rpm/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/rdnf/rpm/README.md b/rdnf/rpm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..02f135aa7ee0c515624b4cac99f901d4ed593e48 --- /dev/null +++ b/rdnf/rpm/README.md @@ -0,0 +1,75 @@ +[![crates.io](https://img.shields.io/crates/v/rpm.svg)](https://crates.io/crates/rpm) +[![docs.rs](https://docs.rs/rpm/badge.svg)](https://docs.rs/rpm) +[![MSRV](https://img.shields.io/badge/rustc-1.63.0+-ab6000.svg)](https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html) +![status](https://github.com/rpm-rs/rpm/actions/workflows/ci.yml/badge.svg) + +## RPM-RS + +A pure rust library for parsing and creating RPM files. + +### Goals + +- Easy to use API +- Pure rust to make it easy to use in larger Projects +- Independence of Spec files. Pure programmatic interface for Packaging. +- Compatibility to Centos 7 / Fedora (I may extend test cases for SUSE) + +### Non Goals + +RPM has a lot of cryptic features. I do not want to re-implement all of them. This library focuses on +the ones that I assume as useful. +This library does not build software like rpmbuild. It is meant for finished artifacts that need to be packaged as RPM. + +### Status + +- [x] RPM Creation +- [x] Basic RPM Reading +- [x] RPM Signing and Signature Verification +- [ ] High Level API for RPM Reading + + + +### Examples + +```rust +use std::str::FromStr; + +use rpm; +use rpm::signature::pgp::{Signer,Verifier}; + +let raw_secret_key = std::fs::read("/path/to/gpg.secret.key")?; +let pkg = rpm::RPMBuilder::new("test", "1.0.0", "MIT", "x86_64", "some awesome package") + .compression(rpm::Compressor::from_str("gzip")?) + .with_file( + "./awesome-config.toml", + rpm::RPMFileOptions::new("/etc/awesome/config.toml").is_config(), + )? + // file mode is inherited from source file + .with_file( + "./awesome-bin", + rpm::RPMFileOptions::new("/usr/bin/awesome"), + )? + .with_file( + "./awesome-config.toml", + // you can set a custom mode and custom user too + rpm::RPMFileOptions::new("/etc/awesome/second.toml").mode(0o100744).user("hugo"), + )? + .pre_install_script("echo preinst") + .add_changelog_entry("me", "was awesome, eh?", 123123123) + .add_changelog_entry("you", "yeah, it was", 12312312) + .requires(rpm::Dependency::any("wget")) + .vendor("corporation or individual") + .url("www.github.com/repo") + .vcs("git:repo=example_repo:branch=example_branch:sha=example_sha") + .build_and_sign(Signer::load_from_asc_bytes(&raw_secret_key)?); + +let mut f = std::fs::File::create("./awesome.rpm")?; +pkg.write(&mut f)?; + +// reading +let raw_pub_key = std::fs::read("/path/to/gpg.key.pub")?; +let pkg = rpm::RPMPackage::open("test_assets/389-ds-base-devel-1.3.8.4-15.el7.x86_64.rpm")?; + +// verifying +pkg.verify_signature(Verifier::load_from_asc_bytes(&raw_pub_key)?)?; +``` diff --git a/rdnf/rpm/src/compat_tests.rs b/rdnf/rpm/src/compat_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2bcc376cd8a938565088894bf54f470af79500b --- /dev/null +++ b/rdnf/rpm/src/compat_tests.rs @@ -0,0 +1,545 @@ +use super::*; +use std::io::prelude::*; +use std::io::BufReader; +use std::process::Stdio; +use std::str::FromStr; + +fn test_rpm_file_path() -> std::path::PathBuf { + let mut rpm_path = cargo_manifest_dir(); + rpm_path.push("test_assets/389-ds-base-devel-1.3.8.4-15.el7.x86_64.rpm"); + rpm_path +} + +fn cargo_manifest_dir() -> std::path::PathBuf { + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) +} + +fn cargo_out_dir() -> std::path::PathBuf { + cargo_manifest_dir().join("target") +} + +#[cfg(feature = "signature-meta")] +use signature::{self, Verifying}; + +#[cfg(feature = "signature-pgp")] +mod pgp { + use super::*; + use futures::io::AsyncWriteExt; + use signature::pgp::{Signer, Verifier}; + use tokio_util::compat::TokioAsyncReadCompatExt; + + #[tokio::test] + #[serial_test::serial] + async fn create_full_rpm_async() -> Result<(), Box> { + let _ = env_logger::try_init(); + let (signing_key, _) = crate::signature::pgp::test::load_asc_keys(); + + let signer = Signer::load_from_asc_bytes(signing_key.as_ref()) + .expect("Must load signer from signing key"); + + let cargo_file = cargo_manifest_dir().join("Cargo.toml"); + let out_file = cargo_out_dir().join("test.rpm"); + + let mut f = tokio::fs::File::create(out_file).await?.compat(); + let pkg = RPMBuilder::new("test", "1.0.0", "MIT", "x86_64", "some package") + .compression(Compressor::from_str("gzip")?) + .with_file_async( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/foo.toml"), + ) + .await? + .with_file_async( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/zazz.toml"), + ) + .await? + .with_file_async( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/hugo/bazz.toml") + .mode(0o100_777) + .is_config(), + ) + .await? + .with_file_async( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/bazz.toml"), + ) + .await? + .with_file_async( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/hugo/aa.toml"), + ) + .await? + .with_file_async( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/var/honollulu/bazz.toml"), + ) + .await? + .with_file_async( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/Cargo.toml"), + ) + .await? + .epoch(1) + .pre_install_script("echo preinst") + .add_changelog_entry("me", "was awesome, eh?", 123_123_123) + .add_changelog_entry("you", "yeah, it was", 12_312_312) + .requires(Dependency::any("rpm-sign".to_string())) + .vendor("dummy vendor") + .url("dummy url") + .vcs("dummy vcs") + .build_and_sign(signer)?; + + pkg.write_async(&mut f).await?; + f.flush().await?; + let epoch = pkg.metadata.get_epoch()?; + assert_eq!(1, epoch); + + let yum_cmd = "yum --disablerepo=updates,updates-testing,updates-modular,fedora-modular install -y /out/test.rpm;"; + let dnf_cmd = "dnf --disablerepo=updates,updates-testing,updates-modular,fedora-modular install -y /out/test.rpm;"; + let rpm_sig_check = "rpm -vv --checksig /out/test.rpm 2>&1;".to_string(); + + [ + ("fedora:36", rpm_sig_check.as_str()), + ("fedora:36", dnf_cmd), + ("centos:stream9", yum_cmd), + ("centos:7", yum_cmd), + ] + .iter() + .try_for_each(|(image, cmd)| { + podman_container_launcher(cmd, image, vec![])?; + Ok(()) + }) + } + + #[test] + #[serial_test::serial] + fn create_empty_rpm() -> Result<(), Box> { + let pkg = RPMBuilder::new("foo", "1.0.0", "MIT", "x86_64", "an empty package").build()?; + let out_file = cargo_out_dir().join("test.rpm"); + + let mut f = std::fs::File::create(out_file)?; + pkg.write(&mut f)?; + let yum_cmd = "yum --disablerepo=updates,updates-testing,updates-modular,fedora-modular install -y /out/test.rpm;"; + let dnf_cmd = "dnf --disablerepo=updates,updates-testing,updates-modular,fedora-modular install -y /out/test.rpm;"; + + [ + ("fedora:36", dnf_cmd), + ("centos:stream9", yum_cmd), + ("centos:7", yum_cmd), + ] + .iter() + .try_for_each(|(image, cmd)| { + podman_container_launcher(cmd, image, vec![])?; + Ok(()) + }) + } + + #[test] + #[serial_test::serial] + fn create_full_rpm_with_signature_and_verify_externally( + ) -> Result<(), Box> { + let _ = env_logger::try_init(); + let (signing_key, _) = crate::signature::pgp::test::load_asc_keys(); + + let signer = Signer::load_from_asc_bytes(signing_key.as_ref()) + .expect("Must load signer from signing key"); + + let cargo_file = cargo_manifest_dir().join("Cargo.toml"); + let out_file = cargo_out_dir().join("test.rpm"); + + let mut f = std::fs::File::create(out_file)?; + let pkg = RPMBuilder::new("test", "1.0.0", "MIT", "x86_64", "some package") + .compression(Compressor::from_str("gzip")?) + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/foo.toml"), + )? + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/zazz.toml"), + )? + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/hugo/bazz.toml") + .mode(0o100_777) + .is_config(), + )? + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/bazz.toml"), + )? + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/hugo/aa.toml"), + )? + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/var/honollulu/bazz.toml"), + )? + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/Cargo.toml"), + )? + .epoch(1) + .pre_install_script("echo preinst") + .add_changelog_entry("me", "was awesome, eh?", 123_123_123) + .add_changelog_entry("you", "yeah, it was", 12_312_312) + .requires(Dependency::any("rpm-sign".to_string())) + .vendor("dummy vendor") + .url("dummy repo") + .vcs("git:repo=example_repo:branch=example_branch:sha=example_sha") + .build_and_sign(signer)?; + + pkg.write(&mut f)?; + let epoch = pkg.metadata.get_epoch()?; + assert_eq!(1, epoch); + + let yum_cmd = "yum --disablerepo=updates,updates-testing,updates-modular,fedora-modular install -y /out/test.rpm;"; + let dnf_cmd = "dnf --disablerepo=updates,updates-testing,updates-modular,fedora-modular install -y /out/test.rpm;"; + let rpm_sig_check = "rpm -vv --checksig /out/test.rpm 2>&1;".to_string(); + + [ + ("fedora:36", rpm_sig_check.as_str()), + ("fedora:36", dnf_cmd), + ("centos:stream9", yum_cmd), + ("centos:7", yum_cmd), + ] + .iter() + .try_for_each(|(image, cmd)| { + podman_container_launcher(cmd, image, vec![])?; + Ok(()) + }) + } + + #[test] + #[serial_test::serial] + fn parse_externally_signed_rpm_and_verify() -> Result<(), Box> { + let _ = env_logger::try_init(); + let (signing_key, verification_key) = crate::signature::pgp::test::load_asc_keys(); + + let cargo_file = cargo_manifest_dir().join("Cargo.toml"); + let out_file = cargo_out_dir().join("roundtrip.rpm"); + + { + let signer = Signer::load_from_asc_bytes(signing_key.as_ref())?; + + let mut f = std::fs::File::create(&out_file)?; + let pkg = RPMBuilder::new( + "roundtrip", + "1.0.0", + "MIT", + "x86_64", + "spins round and round", + ) + .compression(Compressor::from_str("gzip")?) + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/foobar/hugo/bazz.toml") + .mode(FileMode::regular(0o777)) + .is_config(), + )? + .with_file( + cargo_file.to_str().unwrap(), + RPMFileOptions::new("/etc/Cargo.toml"), + )? + .epoch(3) + .pre_install_script("echo preinst") + .add_changelog_entry("you", "yada yada", 12_317_712) + .requires(Dependency::any("rpm-sign".to_string())) + .build_and_sign(&signer)?; + + pkg.write(&mut f)?; + let epoch = pkg.metadata.get_epoch()?; + assert_eq!(3, epoch); + } + + // verify + { + let out_file = std::fs::File::open(&out_file).expect("should be able to open rpm file"); + let mut buf_reader = std::io::BufReader::new(out_file); + let package = RPMPackage::parse(&mut buf_reader)?; + + let verifier = Verifier::load_from_asc_bytes(verification_key.as_ref())?; + + package.verify_signature(verifier)?; + } + + Ok(()) + } + + #[test] + #[serial_test::serial] + fn create_signed_rpm_and_verify() -> Result<(), Box> { + let _ = env_logger::try_init(); + let (_, verification_key) = crate::signature::pgp::test::load_asc_keys(); + + let verifier = Verifier::load_from_asc_bytes(verification_key.as_ref())?; + + let rpm_file_path = test_rpm_file_path(); + let out_file = cargo_out_dir().join(rpm_file_path.file_name().unwrap().to_str().unwrap()); + + println!("cpy {} -> {}", rpm_file_path.display(), out_file.display()); + std::fs::copy(rpm_file_path.as_path(), out_file.as_path()).expect("Must be able to copy"); + + // avoid any further usage + drop(rpm_file_path); + + let cmd = format!( + r#" +echo ">>> sign" +rpm -vv --addsign /out/{rpm_file} 2>&1 + +echo ">>> verify signature with rpm" +rpm -vv --checksig /out/{rpm_file} 2>&1 +"#, + rpm_file = out_file.file_name().unwrap().to_str().unwrap() + ); + + podman_container_launcher(cmd.as_str(), "fedora:36", vec![])?; + + let out_file = std::fs::File::open(&out_file).expect("should be able to open rpm file"); + let mut buf_reader = std::io::BufReader::new(out_file); + let package = RPMPackage::parse(&mut buf_reader)?; + + package.verify_signature(verifier)?; + + Ok(()) + } + + #[test] + #[serial_test::serial] + fn create_signature_with_gpg_and_verify() -> Result<(), Box> { + let _ = env_logger::try_init(); + let (_signing_key, verification_key) = crate::signature::pgp::test::load_asc_keys(); + + let test_file = cargo_out_dir().join("test.file"); + let test_file_sig = cargo_out_dir().join("test.file.sig"); + + std::fs::write(&test_file, "test").expect("Must be able to write"); + let _ = std::fs::remove_file(&test_file_sig); + + let cmd= r#" +echo "test" > /out/test.file + +echo ">>> sign like rpm" +cmd="$(rpm -vv --define "__signature_filename /out/test.file.sig" \ + --define "__plaintext_filename /out/test.file" \ + --define "_gpg_name Package Manager" \ + --eval "%{__gpg_sign_cmd}" | sd '\n' ' ')" + +echo "cmd: ${cmd}" +eval ${cmd} + +alias gpg='gpg --batch --verbose --keyid-format long --no-armor --pinentry-mode error --no-secmem-warning --local-user "Package Manager"' +#gpg \ +# --sign \ +# --detach-sign \ +# --output /out/test.file.sig \ +# /out/test.file 2>&1 + +echo ">>> inspect signature" +gpg -d /out/test.file.sig 2>&1 + +echo ">>> verify external gpg signature" +gpg --verify /out/test.file.sig /out/test.file 2>&1 + +"#.to_owned(); + + podman_container_launcher(cmd.as_str(), "fedora:36", vec![]) + .expect("Container execution must be flawless"); + + let verifier = + Verifier::load_from_asc_bytes(verification_key.as_slice()).expect("Must load"); + + let raw_sig = std::fs::read(&test_file_sig).expect("must load signature"); + let data = std::fs::read(&test_file).expect("must load file"); + verifier.verify(data.as_slice(), raw_sig.as_slice())?; + + Ok(()) + } +} + +fn wait_and_print_helper(mut child: std::process::Child, stdin_cmd: &str) -> std::io::Result<()> { + if let Some(ref mut stdin) = child.stdin { + write!(stdin, "{}", stdin_cmd).unwrap(); + } else { + unreachable!("Must have stdin"); + } + // not perfect, but gets it done + if let Some(ref mut stdout) = child.stdout { + if let Some(ref mut stderr) = child.stderr { + let stdout_rdr = BufReader::new(stdout); + let mut stdout_line = stdout_rdr.lines(); + + let stderr_rdr = BufReader::new(stderr); + let mut stderr_line = stderr_rdr.lines(); + + let mut done: bool = false; + while !done { + done = true; + // can not be written as for loop + while let Some(line) = stdout_line.next() { + done = false; + println!("[stdout] {}", line.unwrap().as_str()); + } + while let Some(line) = stderr_line.next() { + done = false; + println!("[stderr] {}", line.unwrap().as_str()); + } + } + } else { + unreachable!("Must have stderr"); + } + } else { + unreachable!("Must have stdout"); + } + + let status = child.wait()?; + assert!(status.success()); + Ok(()) +} + +fn podman_container_launcher( + cmd: &str, + image: &str, + mut mappings: Vec, +) -> std::io::Result<()> { + // always mount assets and out directory into container + let var_cache = cargo_manifest_dir().join("dnf-cache"); + let _ = std::fs::create_dir(var_cache.as_path()); + let var_cache = format!("{}:/var/cache/dnf:z", var_cache.display()); + let out = format!("{}:/out:z", cargo_out_dir().display()); + let assets = format!("{}/test_assets:/assets:z", cargo_manifest_dir().display()); + mappings.extend(vec![out, assets, var_cache]); + let mut args = mappings + .iter() + .fold(vec!["run", "-i", "--rm"], |mut acc, mapping| { + acc.extend(vec!["-v", mapping]); + acc + }); + args.extend(vec![image, "sh"]); + + let mut podman_cmd = std::process::Command::new("podman"); + + podman_cmd.args(dbg!(args)); + podman_cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + podman_cmd.stdin(Stdio::piped()); + + podman_cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + + podman_cmd.stdin(Stdio::piped()); + + // partially following: + // + // https://access.redhat.com/articles/3359321 + let cmd = vec![ + r#" +set -e + +# prepare rpm macros + +cat > ~/.rpmmacros << EOF_RPMMACROS +%_signature gpg +%_gpg_path /root/.gnupg +%_gpg_name Package Manager +%_gpgbin /usr/bin/gpg2 +%__gpg_sign_cmd %{__gpg} \ + --batch \ + --verbose \ + --no-armor \ + --keyid-format long \ + --pinentry-mode error \ + --no-secmem-warning \ + %{?_gpg_digest_algo:--digest-algo %{_gpg_digest_algo}} \ + --local-user "%{_gpg_name}" \ + --sign \ + --detach-sign \ + --output %{__signature_filename} \ + %{__plaintext_filename} +EOF_RPMMACROS + +cat ~/.rpmmacros + +### either + +#cat > gpgkeyspec <&1 + +### or (which has a couple of advantages regarding reproducibility) + +export PK=/assets/public_key.asc +export SK=/assets/secret_key.asc + +gpg --allow-secret-key-import --import "${SK}" 2>&1 +gpg --import "${PK}" 2>&1 + +gpg --keyid-format long --list-secret-keys +gpg --keyid-format long --list-public-keys + +echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key 2E5A802A67EA36B83018F654CFD331925AB27F39 trust; + + + +echo "\### create a test signature with this particular key id" + +echo "test" | gpg -s --local-user "77500CC056DB3521" > /tmp/test.signature 2>&1 +gpg -d < /tmp/test.signature 2>&1 + +echo "\### export PK" + +gpg --export -a "Package Manager" > /assets/RPM-GPG-KEY-pmanager + +dig1=$(gpg "/assets/RPM-GPG-KEY-pmanager" | sha256sum) +dig2=$(gpg "${PK}" | sha256sum) + +if [ "$dig1" != "$dig2" ]; then +echo "\### expected pub key and exported pubkey differ" + echo "EEE /assets/RPM-GPG-KEY-pmanager" + gpg /assets/RPM-GPG-KEY-pmanager + echo "EEE ${PK}" + gpg "${PK}" + exit 77 +fi + +echo "\### install tooling for signing" + +dnf install --disablerepo=updates,updates-testing,updates-modular -y rpm-sign sd || \ +yum install --disablerepo=updates,updates-testing,updates-modular -y rpm-sign + +echo "\### import pub key" + +rpm -vv --import "${PK}" 2>&1 + +set -x + +"#, +cmd, +r#" + +echo "\### Container should exit any second now" +exit 0 +"#].join("\n"); + + println!("Container execution starting..."); + + // this is far from perfect, but at least pumps + // stdio and stderr out + wait_and_print_helper(podman_cmd.spawn()?, cmd.as_str())?; + println!("Container execution ended."); + Ok(()) +} diff --git a/rdnf/rpm/src/constants.rs b/rdnf/rpm/src/constants.rs new file mode 100644 index 0000000000000000000000000000000000000000..ebee9560724f7841a2dcf07f8a75b5df769ad538 --- /dev/null +++ b/rdnf/rpm/src/constants.rs @@ -0,0 +1,528 @@ +//! RPM specific constants +//! +//! These constants were extracted from the rpm upstream project +//! C headers. + +use std::fmt::Display; + +pub const HEADER_IMAGE: u32 = 61; +pub const HEADER_SIGNATURES: u32 = 62; +pub const HEADER_IMMUTABLE: u32 = 63; +pub const HEADER_REGIONS: u32 = 64; +pub const HEADER_I18NTABLE: u32 = 100; +pub const HEADER_SIGBASE: u32 = 256; +pub const HEADER_TAGBASE: u32 = 1000; +pub const RPMTAG_SIG_BASE: u32 = HEADER_SIGBASE; + +#[repr(u32)] +#[derive( + num_derive::FromPrimitive, + num_derive::ToPrimitive, + Debug, + PartialEq, + Eq, + Copy, + Clone, + enum_display_derive::Display, +)] +#[allow(non_camel_case_types)] +pub enum IndexTag { + RPMTAG_HEADERIMAGE = HEADER_IMAGE, + RPMTAG_HEADERSIGNATURES = HEADER_SIGNATURES, + RPMTAG_HEADERIMMUTABLE = HEADER_IMMUTABLE, + RPMTAG_HEADERREGIONS = HEADER_REGIONS, + + RPMTAG_HEADERI18NTABLE = HEADER_I18NTABLE, + + RPMTAG_SIGSIZE = RPMTAG_SIG_BASE + 1, + RPMTAG_SIGLEMD5_1 = RPMTAG_SIG_BASE + 2, + RPMTAG_SIGPGP = RPMTAG_SIG_BASE + 3, + RPMTAG_SIGLEMD5_2 = RPMTAG_SIG_BASE + 4, + RPMTAG_SIGMD5 = RPMTAG_SIG_BASE + 5, + + RPMTAG_SIGGPG = RPMTAG_SIG_BASE + 6, + RPMTAG_SIGPGP5 = RPMTAG_SIG_BASE + 7, + + RPMTAG_BADSHA1_1 = RPMTAG_SIG_BASE + 8, + RPMTAG_BADSHA1_2 = RPMTAG_SIG_BASE + 9, + RPMTAG_PUBKEYS = RPMTAG_SIG_BASE + 10, + RPMTAG_DSAHEADER = RPMTAG_SIG_BASE + 11, + RPMTAG_RSAHEADER = RPMTAG_SIG_BASE + 12, + RPMTAG_SHA1HEADER = RPMTAG_SIG_BASE + 13, + + RPMTAG_LONGSIGSIZE = RPMTAG_SIG_BASE + 14, + RPMTAG_LONGARCHIVESIZE = RPMTAG_SIG_BASE + 15, + + RPMTAG_SHA256HEADER = RPMTAG_SIG_BASE + 17, + + RPMTAG_VERITYSIGNATURES = RPMTAG_SIG_BASE + 20, + + RPMTAG_VERITYSIGNATUREALGO = RPMTAG_SIG_BASE + 21, + + RPMTAG_NAME = 1000, + + RPMTAG_VERSION = 1001, + + RPMTAG_RELEASE = 1002, + + RPMTAG_EPOCH = 1003, + + RPMTAG_SUMMARY = 1004, + RPMTAG_DESCRIPTION = 1005, + RPMTAG_BUILDTIME = 1006, + RPMTAG_BUILDHOST = 1007, + RPMTAG_INSTALLTIME = 1008, + RPMTAG_SIZE = 1009, + RPMTAG_DISTRIBUTION = 1010, + RPMTAG_VENDOR = 1011, + RPMTAG_GIF = 1012, + RPMTAG_XPM = 1013, + RPMTAG_LICENSE = 1014, + RPMTAG_PACKAGER = 1015, + RPMTAG_GROUP = 1016, + RPMTAG_CHANGELOG = 1017, + RPMTAG_SOURCE = 1018, + RPMTAG_PATCH = 1019, + RPMTAG_URL = 1020, + RPMTAG_OS = 1021, + RPMTAG_ARCH = 1022, + RPMTAG_PREIN = 1023, + RPMTAG_POSTIN = 1024, + RPMTAG_PREUN = 1025, + RPMTAG_POSTUN = 1026, + RPMTAG_OLDFILENAMES = 1027, + RPMTAG_FILESIZES = 1028, + RPMTAG_FILESTATES = 1029, + RPMTAG_FILEMODES = 1030, + RPMTAG_FILEUIDS = 1031, + RPMTAG_FILEGIDS = 1032, + RPMTAG_FILERDEVS = 1033, + RPMTAG_FILEMTIMES = 1034, + RPMTAG_FILEDIGESTS = 1035, + + RPMTAG_FILELINKTOS = 1036, + RPMTAG_FILEFLAGS = 1037, + RPMTAG_ROOT = 1038, + RPMTAG_FILEUSERNAME = 1039, + RPMTAG_FILEGROUPNAME = 1040, + RPMTAG_EXCLUDE = 1041, + RPMTAG_EXCLUSIVE = 1042, + RPMTAG_ICON = 1043, + RPMTAG_SOURCERPM = 1044, + RPMTAG_FILEVERIFYFLAGS = 1045, + RPMTAG_ARCHIVESIZE = 1046, + RPMTAG_PROVIDENAME = 1047, + + RPMTAG_REQUIREFLAGS = 1048, + RPMTAG_REQUIRENAME = 1049, + + RPMTAG_REQUIREVERSION = 1050, + RPMTAG_NOSOURCE = 1051, + RPMTAG_NOPATCH = 1052, + RPMTAG_CONFLICTFLAGS = 1053, + RPMTAG_CONFLICTNAME = 1054, + + RPMTAG_CONFLICTVERSION = 1055, + RPMTAG_DEFAULTPREFIX = 1056, + RPMTAG_BUILDROOT = 1057, + RPMTAG_INSTALLPREFIX = 1058, + RPMTAG_EXCLUDEARCH = 1059, + RPMTAG_EXCLUDEOS = 1060, + RPMTAG_EXCLUSIVEARCH = 1061, + RPMTAG_EXCLUSIVEOS = 1062, + RPMTAG_AUTOREQPROV = 1063, + RPMTAG_RPMVERSION = 1064, + RPMTAG_TRIGGERSCRIPTS = 1065, + RPMTAG_TRIGGERNAME = 1066, + RPMTAG_TRIGGERVERSION = 1067, + RPMTAG_TRIGGERFLAGS = 1068, + RPMTAG_TRIGGERINDEX = 1069, + RPMTAG_VERIFYSCRIPT = 1079, + RPMTAG_CHANGELOGTIME = 1080, + RPMTAG_CHANGELOGNAME = 1081, + RPMTAG_CHANGELOGTEXT = 1082, + RPMTAG_BROKENMD5 = 1083, + RPMTAG_PREREQ = 1084, + RPMTAG_PREINPROG = 1085, + RPMTAG_POSTINPROG = 1086, + RPMTAG_PREUNPROG = 1087, + RPMTAG_POSTUNPROG = 1088, + RPMTAG_BUILDARCHS = 1089, + RPMTAG_OBSOLETENAME = 1090, + + RPMTAG_VERIFYSCRIPTPROG = 1091, + RPMTAG_TRIGGERSCRIPTPROG = 1092, + RPMTAG_DOCDIR = 1093, + RPMTAG_COOKIE = 1094, + RPMTAG_FILEDEVICES = 1095, + RPMTAG_FILEINODES = 1096, + RPMTAG_FILELANGS = 1097, + RPMTAG_PREFIXES = 1098, + RPMTAG_INSTPREFIXES = 1099, + RPMTAG_TRIGGERIN = 1100, + RPMTAG_TRIGGERUN = 1101, + RPMTAG_TRIGGERPOSTUN = 1102, + RPMTAG_AUTOREQ = 1103, + RPMTAG_AUTOPROV = 1104, + RPMTAG_CAPABILITY = 1105, + RPMTAG_SOURCEPACKAGE = 1106, + RPMTAG_OLDORIGFILENAMES = 1107, + RPMTAG_BUILDPREREQ = 1108, + RPMTAG_BUILDREQUIRES = 1109, + RPMTAG_BUILDCONFLICTS = 1110, + RPMTAG_BUILDMACROS = 1111, + RPMTAG_PROVIDEFLAGS = 1112, + RPMTAG_PROVIDEVERSION = 1113, + RPMTAG_OBSOLETEFLAGS = 1114, + RPMTAG_OBSOLETEVERSION = 1115, + RPMTAG_DIRINDEXES = 1116, + RPMTAG_BASENAMES = 1117, + RPMTAG_DIRNAMES = 1118, + RPMTAG_ORIGDIRINDEXES = 1119, + RPMTAG_ORIGBASENAMES = 1120, + RPMTAG_ORIGDIRNAMES = 1121, + RPMTAG_OPTFLAGS = 1122, + RPMTAG_DISTURL = 1123, + RPMTAG_PAYLOADFORMAT = 1124, + RPMTAG_PAYLOADCOMPRESSOR = 1125, + RPMTAG_PAYLOADFLAGS = 1126, + RPMTAG_INSTALLCOLOR = 1127, + RPMTAG_INSTALLTID = 1128, + RPMTAG_REMOVETID = 1129, + RPMTAG_SHA1RHN = 1130, + RPMTAG_RHNPLATFORM = 1131, + RPMTAG_PLATFORM = 1132, + RPMTAG_PATCHESNAME = 1133, + RPMTAG_PATCHESFLAGS = 1134, + RPMTAG_PATCHESVERSION = 1135, + RPMTAG_CACHECTIME = 1136, + RPMTAG_CACHEPKGPATH = 1137, + RPMTAG_CACHEPKGSIZE = 1138, + RPMTAG_CACHEPKGMTIME = 1139, + RPMTAG_FILECOLORS = 1140, + RPMTAG_FILECLASS = 1141, + RPMTAG_CLASSDICT = 1142, + RPMTAG_FILEDEPENDSX = 1143, + RPMTAG_FILEDEPENDSN = 1144, + RPMTAG_DEPENDSDICT = 1145, + RPMTAG_SOURCEPKGID = 1146, + RPMTAG_FILECONTEXTS = 1147, + RPMTAG_FSCONTEXTS = 1148, + RPMTAG_RECONTEXTS = 1149, + RPMTAG_POLICIES = 1150, + RPMTAG_PRETRANS = 1151, + RPMTAG_POSTTRANS = 1152, + RPMTAG_PRETRANSPROG = 1153, + RPMTAG_POSTTRANSPROG = 1154, + RPMTAG_DISTTAG = 1155, + RPMTAG_OLDSUGGESTSNAME = 1156, + + RPMTAG_OLDSUGGESTSVERSION = 1157, + RPMTAG_OLDSUGGESTSFLAGS = 1158, + RPMTAG_OLDENHANCESNAME = 1159, + + RPMTAG_OLDENHANCESVERSION = 1160, + RPMTAG_OLDENHANCESFLAGS = 1161, + RPMTAG_PRIORITY = 1162, + RPMTAG_CVSID = 1163, + + RPMTAG_BLINKPKGID = 1164, + RPMTAG_BLINKHDRID = 1165, + RPMTAG_BLINKNEVRA = 1166, + RPMTAG_FLINKPKGID = 1167, + RPMTAG_FLINKHDRID = 1168, + RPMTAG_FLINKNEVRA = 1169, + RPMTAG_PACKAGEORIGIN = 1170, + RPMTAG_TRIGGERPREIN = 1171, + RPMTAG_BUILDSUGGESTS = 1172, + RPMTAG_BUILDENHANCES = 1173, + RPMTAG_SCRIPTSTATES = 1174, + RPMTAG_SCRIPTMETRICS = 1175, + RPMTAG_BUILDCPUCLOCK = 1176, + RPMTAG_FILEDIGESTALGOS = 1177, + RPMTAG_VARIANTS = 1178, + RPMTAG_XMAJOR = 1179, + RPMTAG_XMINOR = 1180, + RPMTAG_REPOTAG = 1181, + RPMTAG_KEYWORDS = 1182, + RPMTAG_BUILDPLATFORMS = 1183, + RPMTAG_PACKAGECOLOR = 1184, + RPMTAG_PACKAGEPREFCOLOR = 1185, + RPMTAG_XATTRSDICT = 1186, + RPMTAG_FILEXATTRSX = 1187, + RPMTAG_DEPATTRSDICT = 1188, + RPMTAG_CONFLICTATTRSX = 1189, + RPMTAG_OBSOLETEATTRSX = 1190, + RPMTAG_PROVIDEATTRSX = 1191, + RPMTAG_REQUIREATTRSX = 1192, + RPMTAG_BUILDPROVIDES = 1193, + RPMTAG_BUILDOBSOLETES = 1194, + RPMTAG_DBINSTANCE = 1195, + RPMTAG_NVRA = 1196, + + RPMTAG_FILENAMES = 5000, + RPMTAG_FILEPROVIDE = 5001, + RPMTAG_FILEREQUIRE = 5002, + RPMTAG_FSNAMES = 5003, + RPMTAG_FSSIZES = 5004, + RPMTAG_TRIGGERCONDS = 5005, + RPMTAG_TRIGGERTYPE = 5006, + RPMTAG_ORIGFILENAMES = 5007, + RPMTAG_LONGFILESIZES = 5008, + RPMTAG_LONGSIZE = 5009, + RPMTAG_FILECAPS = 5010, + RPMTAG_FILEDIGESTALGO = 5011, + RPMTAG_BUGURL = 5012, + RPMTAG_EVR = 5013, + RPMTAG_NVR = 5014, + RPMTAG_NEVR = 5015, + RPMTAG_NEVRA = 5016, + RPMTAG_HEADERCOLOR = 5017, + RPMTAG_VERBOSE = 5018, + RPMTAG_EPOCHNUM = 5019, + RPMTAG_PREINFLAGS = 5020, + RPMTAG_POSTINFLAGS = 5021, + RPMTAG_PREUNFLAGS = 5022, + RPMTAG_POSTUNFLAGS = 5023, + RPMTAG_PRETRANSFLAGS = 5024, + RPMTAG_POSTTRANSFLAGS = 5025, + RPMTAG_VERIFYSCRIPTFLAGS = 5026, + RPMTAG_TRIGGERSCRIPTFLAGS = 5027, + RPMTAG_COLLECTIONS = 5029, + RPMTAG_POLICYNAMES = 5030, + RPMTAG_POLICYTYPES = 5031, + RPMTAG_POLICYTYPESINDEXES = 5032, + RPMTAG_POLICYFLAGS = 5033, + RPMTAG_VCS = 5034, + RPMTAG_ORDERNAME = 5035, + RPMTAG_ORDERVERSION = 5036, + RPMTAG_ORDERFLAGS = 5037, + RPMTAG_MSSFMANIFEST = 5038, + RPMTAG_MSSFDOMAIN = 5039, + RPMTAG_INSTFILENAMES = 5040, + RPMTAG_REQUIRENEVRS = 5041, + RPMTAG_PROVIDENEVRS = 5042, + RPMTAG_OBSOLETENEVRS = 5043, + RPMTAG_CONFLICTNEVRS = 5044, + RPMTAG_FILENLINKS = 5045, + RPMTAG_RECOMMENDNAME = 5046, + + RPMTAG_RECOMMENDVERSION = 5047, + RPMTAG_RECOMMENDFLAGS = 5048, + RPMTAG_SUGGESTNAME = 5049, + + RPMTAG_SUGGESTVERSION = 5050, + RPMTAG_SUGGESTFLAGS = 5051, + RPMTAG_SUPPLEMENTNAME = 5052, + + RPMTAG_SUPPLEMENTVERSION = 5053, + RPMTAG_SUPPLEMENTFLAGS = 5054, + RPMTAG_ENHANCENAME = 5055, + + RPMTAG_ENHANCEVERSION = 5056, + RPMTAG_ENHANCEFLAGS = 5057, + RPMTAG_RECOMMENDNEVRS = 5058, + RPMTAG_SUGGESTNEVRS = 5059, + RPMTAG_SUPPLEMENTNEVRS = 5060, + RPMTAG_ENHANCENEVRS = 5061, + RPMTAG_ENCODING = 5062, + RPMTAG_FILETRIGGERIN = 5063, + RPMTAG_FILETRIGGERUN = 5064, + RPMTAG_FILETRIGGERPOSTUN = 5065, + RPMTAG_FILETRIGGERSCRIPTS = 5066, + RPMTAG_FILETRIGGERSCRIPTPROG = 5067, + RPMTAG_FILETRIGGERSCRIPTFLAGS = 5068, + RPMTAG_FILETRIGGERNAME = 5069, + RPMTAG_FILETRIGGERINDEX = 5070, + RPMTAG_FILETRIGGERVERSION = 5071, + RPMTAG_FILETRIGGERFLAGS = 5072, + RPMTAG_TRANSFILETRIGGERIN = 5073, + RPMTAG_TRANSFILETRIGGERUN = 5074, + RPMTAG_TRANSFILETRIGGERPOSTUN = 5075, + RPMTAG_TRANSFILETRIGGERSCRIPTS = 5076, + RPMTAG_TRANSFILETRIGGERSCRIPTPROG = 5077, + RPMTAG_TRANSFILETRIGGERSCRIPTFLAGS = 5078, + RPMTAG_TRANSFILETRIGGERNAME = 5079, + RPMTAG_TRANSFILETRIGGERINDEX = 5080, + RPMTAG_TRANSFILETRIGGERVERSION = 5081, + RPMTAG_TRANSFILETRIGGERFLAGS = 5082, + RPMTAG_REMOVEPATHPOSTFIXES = 5083, + RPMTAG_FILETRIGGERPRIORITIES = 5084, + RPMTAG_TRANSFILETRIGGERPRIORITIES = 5085, + RPMTAG_FILETRIGGERCONDS = 5086, + RPMTAG_FILETRIGGERTYPE = 5087, + RPMTAG_TRANSFILETRIGGERCONDS = 5088, + RPMTAG_TRANSFILETRIGGERTYPE = 5089, + RPMTAG_FILESIGNATURES = 5090, + RPMTAG_FILESIGNATURELENGTH = 5091, + RPMTAG_PAYLOADDIGEST = 5092, + RPMTAG_PAYLOADDIGESTALGO = 5093, + RPMTAG_AUTOINSTALLED = 5094, + RPMTAG_IDENTITY = 5095, + RPMTAG_MODULARITYLABEL = 5096, + RPMTAG_PAYLOADDIGESTALT = 5097, +} + +#[repr(u32)] +#[derive( + num_derive::FromPrimitive, + num_derive::ToPrimitive, + Debug, + PartialEq, + Eq, + Copy, + Clone, + enum_display_derive::Display, +)] +#[allow(non_camel_case_types)] +pub enum IndexSignatureTag { + HEADER_SIGNATURES = HEADER_SIGNATURES, + // This tag specifies the combined size of the Header and Payload sections. + RPMSIGTAG_SIZE = HEADER_TAGBASE, //1000 + + RPMSIGTAG_LEMD5_1 = HEADER_TAGBASE + 1, //1001 + + // This tag specifies the RSA signature of the combined Header and Payload sections. + // The data is formatted as a Version 3 Signature Packet as specified in RFC 2440: OpenPGP Message Format. + RPMSIGTAG_PGP = HEADER_TAGBASE + 2, //1002 + + RPMSIGTAG_LEMD5_2 = HEADER_TAGBASE + 3, //1003 + + //This tag specifies the 128-bit MD5 checksum of the combined Header and Archive sections. + RPMSIGTAG_MD5 = HEADER_TAGBASE + 4, //1004 + + // The tag contains the DSA signature of the combined Header and Payload sections. + // The data is formatted as a Version 3 Signature Packet as specified in RFC 2440: OpenPGP Message Format. + RPMSIGTAG_GPG = HEADER_TAGBASE + 5, //1005 + + RPMSIGTAG_PGP5 = HEADER_TAGBASE + 6, //1006 + + //This tag specifies the uncompressed size of the Payload archive, including the cpio headers. + RPMSIGTAG_PAYLOADSIZE = HEADER_TAGBASE + 7, //1007 + + RPMSIGTAG_RESERVEDSPACE = HEADER_TAGBASE + 8, //1008 + + RPMSIGTAG_BADSHA1_1 = IndexTag::RPMTAG_BADSHA1_1 as u32, //264 + + RPMSIGTAG_BADSHA1_2 = IndexTag::RPMTAG_BADSHA1_2 as u32, //265 + + //The tag contains the DSA signature of the Header section. + // The data is formatted as a Version 3 Signature Packet as specified in RFC 2440: OpenPGP Message Format. + // If this tag is present, then the SIGTAG_GPG tag shall also be present. + RPMSIGTAG_DSA = IndexTag::RPMTAG_DSAHEADER as u32, //267 + + // The tag contains the RSA signature of the Header section. + // The data is formatted as a Version 3 Signature Packet as specified in RFC 2440: OpenPGP Message Format. + // If this tag is present, then the SIGTAG_PGP shall also be present. + RPMSIGTAG_RSA = IndexTag::RPMTAG_RSAHEADER as u32, //268 + + //This index contains the SHA1 checksum of the entire Header Section, + //including the Header Record, Index Records and Header store. + RPMSIGTAG_SHA1 = IndexTag::RPMTAG_SHA1HEADER as u32, //269 + + // Header+payload size if > 4GB. + RPMSIGTAG_LONGSIGSIZE = IndexTag::RPMTAG_LONGSIGSIZE as u32, //270 + + // (Compressed) payload size when > 4GB. + RPMSIGTAG_LONGARCHIVESIZE = IndexTag::RPMTAG_LONGARCHIVESIZE as u32, //271 + + //This index contains the SHA256 checksum of the entire Header Section, + //including the Header Record, Index Records and Header store. + RPMSIGTAG_SHA256 = IndexTag::RPMTAG_SHA256HEADER as u32, //273 + + // The tag contains the file signature of a file. + // The data is formatted as a hex-encoded string. + // If this tag is present, then the SIGTAG_FILESIGNATURE_LENGTH shall also be present. + RPMSIGTAG_FILESIGNATURES = RPMTAG_SIG_BASE + 18, //274 + + RPMTAG_FILESIGNATURES = IndexTag::RPMTAG_FILESIGNATURES as u32, //5090 + + // The tag contains the length of the file signatures in total. + // If this tag is present, then the SIGTAG_FILESIGNATURE shall also be present. + RPMSIGTAG_FILESIGNATURELENGTH = RPMTAG_SIG_BASE + 19, //275 + + RPMTAG_FILESIGNATURELENGTH = IndexTag::RPMTAG_FILESIGNATURELENGTH as u32, //5091 + + RPMSIGTAG_VERITYSIGNATURES = IndexTag::RPMTAG_VERITYSIGNATURES as u32, //276 + + RPMSIGTAG_VERITYSIGNATURE_ALGO = IndexTag::RPMTAG_VERITYSIGNATUREALGO as u32, //277 +} + +pub trait TypeName { + fn type_name() -> &'static str; +} + +impl TypeName for IndexTag { + fn type_name() -> &'static str { + "IndexTag" + } +} + +impl TypeName for IndexSignatureTag { + fn type_name() -> &'static str { + "IndexSignatureTag" + } +} + +/// lead header size +pub const LEAD_SIZE: usize = 96; +/// rpm magic as part of the lead header +pub const RPM_MAGIC: [u8; 4] = [0xed, 0xab, 0xee, 0xdb]; + +/// header magic recognition (not the lead!) +pub const HEADER_MAGIC: [u8; 3] = [0x8e, 0xad, 0xe8]; + +pub const RPMSENSE_ANY: u32 = 0; +pub const RPMSENSE_LESS: u32 = 1 << 1; +pub const RPMSENSE_GREATER: u32 = 1 << 2; +pub const RPMSENSE_EQUAL: u32 = 1 << 3; + +// there is no use yet for those constants. But they are part of the official package +// so I will leave them in in case we need them later. + +// const RPMSENSE_POSTTRANS: u32 = (1 << 5); +// const RPMSENSE_PREREQ: u32 = (1 << 6); +// const RPMSENSE_PRETRANS: u32 = (1 << 7); +// const RPMSENSE_INTERP: u32 = (1 << 8); +// const RPMSENSE_SCRIPT_PRE: u32 = (1 << 9); +// const RPMSENSE_SCRIPT_POST: u32 = (1 << 10); +// const RPMSENSE_SCRIPT_PREUN: u32 = (1 << 11); +// const RPMSENSE_SCRIPT_POSTUN: u32 = (1 << 12); +// const RPMSENSE_SCRIPT_VERIFY: u32 = (1 << 13); +// const RPMSENSE_FIND_REQUIRES: u32 = (1 << 14); // 16384 find-requires generated dependency +// const RPMSENSE_FIND_PROVIDES: u32 = (1 << 15); // 32768 find_provides generated dependency +// const RPMSENSE_TRIGGERIN: u32 = (1 << 16); +// const RPMSENSE_TRIGGERUN: u32 = (1 << 17); +// const RPMSENSE_TRIGGERPOSTUN: u32 = (1 << 18); +// const RPMSENSE_MISSINGOK: u32 = (1 << 19); + +// // for some weird reason, centos packages have another value for rpm lib sense. We have to observe this. +pub const RPMSENSE_RPMLIB: u32 = 1 << 24; // 16777226 +// const RPMSENSE_TRIGGERPREIN: u32 = (1 << 25); +// const RPMSENSE_KEYRING: u32 = (1 << 26); +// pub const RPMSENSE_CONFIG: u32 = (1 << 28); + +pub const RPMFILE_CONFIG: u32 = 1; +pub const RPMFILE_DOC: u32 = 1 << 1; +// pub const RPMFILE_DONOTUSE: u32 = 1 << 2; +// pub const RPMFILE_MISSINGOK: u32 = 1 << 3; +// pub const RPMFILE_NOREPLACE: u32 = 1 << 4; +// pub const RPMFILE_SPECFILE: u32 = 1 << 5; +// pub const RPMFILE_GHOST: u32 = 1 << 6; +// pub const RPMFILE_LICENSE: u32 = 1 << 7; +// pub const RPMFILE_README: u32 = 1 << 8; +// pub const RPMFILE_EXCLUDE: u32 = 1 << 9; + +// should be technically equiv to +// `pgp::crypto::hash::HashAlgorithm` +// but that is only available with feature `signature` +pub const PGPHASHALGO_MD5: u32 = 1; +pub const PGPHASHALGO_SHA1: u32 = 2; +pub const PGPHASHALGO_RIPEMD160: u32 = 3; +pub const PGPHASHALGO_MD2: u32 = 5; +pub const PGPHASHALGO_TIGER192: u32 = 6; +pub const PGPHASHALGO_HAVAL_5_160: u32 = 7; +pub const PGPHASHALGO_SHA256: u32 = 8; +pub const PGPHASHALGO_SHA384: u32 = 9; +pub const PGPHASHALGO_SHA512: u32 = 10; +pub const PGPHASHALGO_SHA224: u32 = 11; diff --git a/rdnf/rpm/src/db.rs b/rdnf/rpm/src/db.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4e5c1b4da7a45127d4dc4a330c6e0e8338caf8c --- /dev/null +++ b/rdnf/rpm/src/db.rs @@ -0,0 +1,71 @@ +use std::io::Cursor; + +use rusqlite::Connection; + +use crate::{Header, IndexTag, RPMError}; + +#[test] +pub fn test_db() { + let path = "test_assets/rpmdb.sqlite"; + let conn = Connection::open(path).unwrap(); + let start = SystemTime::now(); + match get_pkg_headers_by_pkg_name(&conn, "libgcc") { + Ok(s) => match s { + Some(n) => { + dbg!(n.get_conflicts().unwrap()); + // dbg!(n.get_arch().unwrap()); + } + None => {} + }, + Err(e) => { + dbg!(e); + } + }; + // dbg!(heads.get_arch()); + let end = SystemTime::now().duration_since(start).unwrap(); + dbg!(end); +} +#[inline] +pub fn get_pkg_headers_by_pkg_name( + conn: &Connection, + pkg_name: &str, +) -> Result>, RPMError> { + let mut stmt = conn + .prepare( + "SELECT Packages.blob + from Packages INNER JOIN Name on Packages.hnum=Name.hnum + WHERE Name.key= ?1", + ) + .unwrap(); + let res = stmt.query_and_then([pkg_name], |row| row.get::<_, Vec>(0))?; + let mut v = Vec::new(); + for ele in res { + v.push(parse_pkg_blob_to_header(ele?.as_mut())?); + } + Ok(v.pop()) +} +// #[inline] +// pub fn get_installed_pkg_headers_by_provide_name( +// conn: &Connection, +// provide_name: &str, +// ) -> Result>, RPMError> { +// let mut stmt = conn.prepare( +// "SELECT Packages.blob +// FROM Packages INNER JOIN Providename on Packages.hnum=Providename.hnum +// WHERE Providename.key=?1", +// )?; +// let res = stmt.query_and_then([provide_name], |row| row.get::<_, Vec>(0))?; +// let mut v = Vec::new(); +// for ele in res { +// v.push(parse_pkg_blob_to_header(ele?.as_mut())?); +// } +// Ok(v.pop()) +// } +#[inline] +pub fn parse_pkg_blob_to_header(blob: &mut Vec) -> Result, RPMError> { + let mut buf = vec![0x8e, 0xad, 0xe8, 0x01, 0x00, 0x00, 0x00, 0x00]; + buf.append(blob); + let mut input = Cursor::new(buf); + Header::::parse(&mut input) +} +// SELECT Packages .blob from Packages INNER JOIN Name on Packages.hnum=Name.hnum WHERE Name.key="libgcc" diff --git a/rdnf/rpm/src/errors.rs b/rdnf/rpm/src/errors.rs new file mode 100644 index 0000000000000000000000000000000000000000..fb32c7f4d674d8b7ef159466ff3a7ac86940a72d --- /dev/null +++ b/rdnf/rpm/src/errors.rs @@ -0,0 +1,111 @@ +use std::io; + +use thiserror::Error; + +use crate::FileDigestAlgorithm; + +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum RPMError { + #[error(transparent)] + Io(#[from] io::Error), + + #[error(transparent)] + Hex(#[from] hex::FromHexError), + + #[error("{0}")] + Nom(String), + #[error( + "invalid magic expected: {expected} but got: {actual} - whole input was {complete_input:?}" + )] + InvalidMagic { + expected: u8, + actual: u8, + complete_input: Vec, + }, + #[error("unsupported Version {0} - only header version 1 is supported")] + UnsupportedHeaderVersion(u8), + #[error("invalid tag {raw_tag} for store {store_type}")] + InvalidTag { + raw_tag: u32, + store_type: &'static str, + }, + #[error("invalid tag data type in store {store_type}: expected 0 - 9 but got {raw_data_type}")] + InvalidTagDataType { + raw_data_type: u32, + store_type: &'static str, + }, + #[error("unable to find tag {0}")] + TagNotFound(String), + #[error("tag {tag} has data type {actual_data_type}, not {expected_data_type}")] + UnexpectedTagDataType { + expected_data_type: &'static str, + actual_data_type: String, + tag: String, + }, + #[error("invalid tag array index {tag} with {index} while bounded at {bound}")] + InvalidTagIndex { tag: String, index: u32, bound: u32 }, + + #[error("invalid tag value enum variant for {tag} with {variant}")] + InvalidTagValueEnumVariant { tag: String, variant: u32 }, + + #[error("invalid size of reserved area - expected length of {expected} but got {actual}")] + InvalidReservedSpaceSize { expected: u16, actual: usize }, + + #[error("invalid destination path {path} - {desc}")] + InvalidDestinationPath { path: String, desc: &'static str }, + + #[error("signature packet not found in what is supposed to be a signature")] + NoSignatureFound, + + #[error("error creating signature: {0}")] + SignError(Box), + + #[error("error parsing key - {details}. underlying error was: {source}")] + KeyLoadError { + source: Box, + details: &'static str, + }, + + #[error("error verifying signature with key {key_ref}: {source}")] + VerificationError { + source: Box, + key_ref: String, + }, + + #[error("unable to find key with key-ref: {key_ref}")] + KeyNotFoundError { key_ref: String }, + + #[error("unknown compressor type {0} - only gzip and none are supported")] + UnknownCompressorType(String), + + #[error("unsupported file digest algorithm {0:?}")] + UnsupportedFileDigestAlgorithm(FileDigestAlgorithm), + + #[error("invalid file mode {raw_mode} - {reason}")] + InvalidFileMode { raw_mode: i32, reason: &'static str }, + + #[error("error using sqlite rpmdb.sqlite {0}")] + SqliteError(String), +} + +impl From> for RPMError { + fn from(error: nom::Err<(&[u8], nom::error::ErrorKind)>) -> Self { + match error { + nom::Err::Error((_, kind)) | nom::Err::Failure((_, kind)) => { + RPMError::Nom(kind.description().to_string()) + } + nom::Err::Incomplete(_) => RPMError::Nom("unhandled incomplete".to_string()), + } + } +} +impl From for RPMError { + fn from(error: rusqlite::Error) -> Self { + // dbg!(error); + // error.to_string() + RPMError::SqliteError(error.to_string()) + // match error { + // _ => {}, + // } + } +} diff --git a/rdnf/rpm/src/lib.rs b/rdnf/rpm/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..a0225ad4d40423b4d78745352994796913af1577 --- /dev/null +++ b/rdnf/rpm/src/lib.rs @@ -0,0 +1,84 @@ +//! # rpm-rs +//! +//! A library providing API to parse rpms as well as +//! creating rpms from individual files. +//! +//! # Example +//! +//! ```rust +//! +//! # #[cfg(feature = "signature-meta")] +//! use rpm::{ +//! signature::pgp::{ +//! Signer, +//! Verifier +//! } +//! }; +//! use std::str::FromStr; +//! +//! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "signature-meta")] +//! # { +//! let raw_secret_key = std::fs::read("./test_assets/secret_key.asc")?; +//! let pkg = rpm::RPMBuilder::new("test", "1.0.0", "MIT", "x86_64", "some awesome package") +//! .compression(rpm::Compressor::from_str("gzip")?) +//! .with_file( +//! "./test_assets/awesome.toml", +//! rpm::RPMFileOptions::new("/etc/awesome/config.toml") +//! .is_config(), +//! )? +//! // file mode is inherited from source file +//! .with_file( +//! "./test_assets/awesome.py", +//! rpm::RPMFileOptions::new("/usr/bin/awesome"), +//! )? +//! .with_file( +//! "./test_assets/awesome.toml", +//! // you can set a custom mode and custom user too +//! rpm::RPMFileOptions::new("/etc/awesome/second.toml") +//! .mode(rpm::FileMode::regular(0o644)) +//! .user("hugo"), +//! )? +//! .pre_install_script("echo preinst") +//! .add_changelog_entry("me", "was awesome, eh?", 123123123) +//! .add_changelog_entry("you", "yeah, it was", 12312312) +//! .requires(rpm::Dependency::any("wget")) +//! .vendor("corporation or individual") +//! .url("www.github.com/repo") +//! .vcs("git:repo=example_repo:branch=example_branch:sha=example_sha") +//! .build_and_sign( +//! Signer::load_from_asc_bytes(&raw_secret_key)? +//! )?; +//! let mut f = std::fs::File::create("./target/awesome.rpm")?; +//! pkg.write(&mut f)?; +//! +//! // reading +//! let raw_pub_key = std::fs::read("./test_assets/public_key.asc")?; +//! let pkg = rpm::RPMPackage::open("./target/awesome.rpm")?; +//! // verifying +//! pkg.verify_signature(Verifier::load_from_asc_bytes(&raw_pub_key)?)?; +//! # } +//! # Ok(()) +//! # } +//! ``` + +#![allow(unknown_lints, clippy::uninlined_format_args)] + +mod errors; +pub use crate::errors::*; + +pub(crate) mod constants; +pub use crate::constants::*; + +#[cfg(feature = "signature-meta")] +mod sequential_cursor; + +mod rpm; +pub use crate::rpm::*; +pub mod db; +mod test_rpm; +#[cfg(test)] +mod tests; + +#[cfg(all(test, feature = "test-with-podman"))] +mod compat_tests; diff --git a/rdnf/rpm/src/rpm/builder.rs b/rdnf/rpm/src/rpm/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..88dfa545314a443fcc95bbbf10ad2992586c94ed --- /dev/null +++ b/rdnf/rpm/src/rpm/builder.rs @@ -0,0 +1,979 @@ +use std::collections::{BTreeMap, BTreeSet}; +use std::convert::TryInto; + +use std::io::{Read, Write}; +#[cfg(unix)] +use std::os::unix::fs::PermissionsExt; + +use std::path::{Path, PathBuf}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::errors::*; + +use super::compressor::Compressor; +use super::headers::*; +use super::Lead; +use crate::constants::*; + +#[cfg(feature = "signature-meta")] +use crate::sequential_cursor::SeqCursor; +#[cfg(feature = "signature-meta")] +use crate::signature; + +use crate::RPMPackage; +use crate::RPMPackageMetadata; + +#[cfg(feature = "with-file-async-tokio")] +use tokio_util::compat::TokioAsyncReadCompatExt; + +#[cfg(feature = "async-futures")] +use futures::io::{AsyncRead, AsyncReadExt}; + +#[cfg(unix)] +fn file_mode(file: &std::fs::File) -> Result { + Ok(file.metadata()?.permissions().mode()) +} + +#[cfg(windows)] +fn file_mode(_file: &std::fs::File) -> Result { + Ok(0) +} + +#[cfg(all(unix, feature = "with-file-async-tokio"))] +async fn async_file_mode(file: &tokio::fs::File) -> Result { + Ok(file.metadata().await?.permissions().mode()) +} + +#[cfg(all( + unix, + feature = "with-file-async-async-std", + not(feature = "with-file-async-tokio") +))] +async fn async_file_mode(file: &async_std::fs::File) -> Result { + Ok(file.metadata().await?.permissions().mode()) +} + +#[cfg(all(windows, feature = "with-file-async-tokio"))] +async fn async_file_mode(_file: &tokio::fs::File) -> Result { + Ok(0) +} + +#[cfg(all( + windows, + feature = "with-file-async-async-std", + not(feature = "with-file-async-tokio") +))] +async fn async_file_mode(_file: &async_std::fs::File) -> Result { + Ok(0) +} + +/// Builder pattern for a full rpm file. +/// +/// Preferred method of creating a rpm file. +pub struct RPMBuilder { + name: String, + epoch: u32, + build_time: u32, // because rpm_time_t is an uint32 + version: String, + license: String, + arch: String, + uid: Option, + gid: Option, + desc: String, + release: String, + + // File entries need to be sorted. The entries need to be in the same order as they come + // in the cpio payload. Otherwise rpm will not be able to resolve those paths. + // key is the directory, values are complete paths + files: BTreeMap, + directories: BTreeSet, + requires: Vec, + obsoletes: Vec, + provides: Vec, + conflicts: Vec, + + pre_inst_script: Option, + post_inst_script: Option, + pre_uninst_script: Option, + post_uninst_script: Option, + + changelog_authors: Vec, + changelog_entries: Vec, + changelog_times: Vec, + compressor: Compressor, + + vendor: Option, + url: Option, + vcs: Option, +} + +impl RPMBuilder { + pub fn new(name: &str, version: &str, license: &str, arch: &str, desc: &str) -> Self { + RPMBuilder { + name: name.to_string(), + epoch: 0, + build_time: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs() as u32, + version: version.to_string(), + license: license.to_string(), + arch: arch.to_string(), + desc: desc.to_string(), + release: "1".to_string(), + uid: None, + gid: None, + conflicts: Vec::new(), + provides: Vec::new(), + obsoletes: Vec::new(), + requires: Vec::new(), + pre_inst_script: None, + post_inst_script: None, + pre_uninst_script: None, + post_uninst_script: None, + files: BTreeMap::new(), + changelog_authors: Vec::new(), + changelog_entries: Vec::new(), + changelog_times: Vec::new(), + compressor: Compressor::None(Vec::new()), + directories: BTreeSet::new(), + vendor: None, + url: None, + vcs: None, + } + } + + pub fn vendor>(mut self, content: T) -> Self { + self.vendor = Some(content.into()); + self + } + pub fn url>(mut self, content: T) -> Self { + self.url = Some(content.into()); + self + } + + pub fn vcs>(mut self, content: T) -> Self { + self.vcs = Some(content.into()); + self + } + + pub fn epoch(mut self, epoch: u32) -> Self { + self.epoch = epoch; + self + } + + pub fn compression(mut self, comp: Compressor) -> Self { + self.compressor = comp; + self + } + + pub fn add_changelog_entry(mut self, author: E, entry: F, time: u32) -> Self + where + E: Into, + F: Into, + { + self.changelog_authors.push(author.into()); + self.changelog_entries.push(entry.into()); + self.changelog_times.push(time); + self + } + + #[cfg(feature = "with-file-async-tokio")] + pub async fn with_file_async(self, source: P, options: T) -> Result + where + P: AsRef, + T: Into, + { + let input = tokio::fs::File::open(source).await?; + let mut options = options.into(); + if options.inherit_permissions { + options.mode = (async_file_mode(&input).await? as i32).into(); + } + let modified_at = input + .metadata() + .await? + .modified()? + .duration_since(UNIX_EPOCH) + .expect("system time predates the Unix epoch?") + .as_secs(); + + self.with_file_async_inner(input.compat(), modified_at, options) + .await + } + + #[cfg(all( + feature = "with-file-async-async-std", + not(feature = "with-file-async-tokio") + ))] + pub async fn with_file_async(self, source: P, options: T) -> Result + where + P: AsRef, + T: Into, + { + let input = async_std::fs::File::open(source.as_ref()).await?; + let mut options = options.into(); + if options.inherit_permissions { + options.mode = (async_file_mode(&input).await? as i32).into(); + } + let modified_at = input + .metadata() + .await? + .modified()? + .duration_since(UNIX_EPOCH) + .expect("system time predates the Unix epoch?") + .as_secs(); + + self.with_file_async_inner(input, modified_at, options) + .await + } + + #[cfg(feature = "async-futures")] + async fn with_file_async_inner

( + mut self, + mut input: P, + modified_at: u64, + options: RPMFileOptions, + ) -> Result + where + P: AsyncRead + Unpin, + { + let mut content = Vec::new(); + input.read_to_end(&mut content).await?; + let mtime = modified_at + .try_into() + .expect("file mtime is likely wrong, too large to be stored as uint32"); + self.add_data(content, mtime, options)?; + Ok(self) + } + + pub fn with_file(mut self, source: P, options: T) -> Result + where + P: AsRef, + T: Into, + { + let mut input = std::fs::File::open(source)?; + let mut content = Vec::new(); + input.read_to_end(&mut content)?; + let mut options = options.into(); + if options.inherit_permissions { + options.mode = (file_mode(&input)? as i32).into(); + } + self.add_data( + content, + input + .metadata()? + .modified()? + .duration_since(UNIX_EPOCH) + .expect("system time predates the Unix epoch?") + .as_secs() + .try_into() + .expect("file mtime is wrong, too large to be stored as uint32"), + options, + )?; + Ok(self) + } + + fn add_data( + &mut self, + content: Vec, + modified_at: u32, + options: RPMFileOptions, + ) -> Result<(), RPMError> { + use sha2::Digest; + + let dest = options.destination; + if !dest.starts_with("./") && !dest.starts_with('/') { + return Err(RPMError::InvalidDestinationPath { + path: dest, + desc: "invalid start, expected / or ./", + }); + } + + let pb = PathBuf::from(dest.clone()); + + let parent = pb + .parent() + .ok_or_else(|| RPMError::InvalidDestinationPath { + path: dest.clone(), + desc: "no parent directory found", + })?; + + let (cpio_path, dir) = if dest.starts_with('.') { + ( + dest.to_string(), + // strip_prefix() should never fail because we've checked the special cases already + format!("/{}/", parent.strip_prefix(".").unwrap().to_string_lossy()), + ) + } else { + ( + format!(".{}", dest), + format!("{}/", parent.to_string_lossy()), + ) + }; + + let mut hasher = sha2::Sha256::default(); + hasher.update(&content); + let hash_result = hasher.finalize(); + let sha_checksum = hex::encode(hash_result); // encode as string + let entry = RPMFileEntry { + // file_name() should never fail because we've checked the special cases already + base_name: pb.file_name().unwrap().to_string_lossy().to_string(), + size: content.len() as u32, + content, + flag: options.flag, + user: options.user, + group: options.group, + mode: options.mode, + link: options.symlink, + modified_at, + dir: dir.clone(), + sha_checksum, + }; + + self.directories.insert(dir); + self.files.entry(cpio_path).or_insert(entry); + Ok(()) + } + + pub fn pre_install_script>(mut self, content: T) -> Self { + self.pre_inst_script = Some(content.into()); + self + } + + pub fn post_install_script>(mut self, content: T) -> Self { + self.post_inst_script = Some(content.into()); + self + } + + pub fn pre_uninstall_script>(mut self, content: T) -> Self { + self.pre_uninst_script = Some(content.into()); + self + } + + pub fn post_uninstall_script>(mut self, content: T) -> Self { + self.post_uninst_script = Some(content.into()); + self + } + + pub fn release(mut self, release: T) -> Self { + self.release = release.to_string(); + self + } + + pub fn requires(mut self, dep: Dependency) -> Self { + self.requires.push(dep); + self + } + + pub fn obsoletes(mut self, dep: Dependency) -> Self { + self.obsoletes.push(dep); + self + } + + pub fn conflicts(mut self, dep: Dependency) -> Self { + self.conflicts.push(dep); + self + } + + pub fn provides(mut self, dep: Dependency) -> Self { + self.provides.push(dep); + self + } + + /// build without a signature + /// + /// ignores a present key, if any + pub fn build(self) -> Result { + let (lead, header_idx_tag, content) = self.prepare_data()?; + + let mut header = Vec::with_capacity(128); + header_idx_tag.write(&mut header)?; + + #[cfg(feature = "signature-meta")] + let digest_header = { + let header = header; + let (header_digest_sha1, header_and_content_digest_md5) = + Self::derive_hashes(header.as_slice(), content.as_slice())?; + let header_and_content_len = header.len() + content.len(); + + Header::::builder() + .add_digest( + header_digest_sha1.as_str(), + header_and_content_digest_md5.as_slice(), + ) + .build( + header_and_content_len + .try_into() + .expect("signature header + signature length must be <4gb"), + ) + }; + #[cfg(not(feature = "signature-meta"))] + let digest_header = { Header::::new_empty() }; + + let metadata = RPMPackageMetadata { + lead, + signature: digest_header, + header: header_idx_tag, + }; + let pkg = RPMPackage { metadata, content }; + Ok(pkg) + } + + /// use an external signer to sing and build + /// + /// See `signature::Signing` for more details. + #[cfg(feature = "signature-meta")] + pub fn build_and_sign(self, signer: S) -> Result + where + S: signature::Signing, + { + let (lead, header_idx_tag, content) = self.prepare_data()?; + + let mut header = Vec::with_capacity(128); + header_idx_tag.write(&mut header)?; + let header = header; + + let (header_digest_sha1, header_and_content_digest_md5) = + Self::derive_hashes(header.as_slice(), content.as_slice())?; + + let header_and_content_len = header.len() + content.len(); + + let builder = Header::::builder().add_digest( + header_digest_sha1.as_str(), + header_and_content_digest_md5.as_slice(), + ); + + let signature_header = { + let rsa_sig_header_only = signer.sign(header.as_slice())?; + + let cursor = SeqCursor::new(&[header.as_slice(), content.as_slice()]); + let rsa_sig_header_and_archive = signer.sign(cursor)?; + + builder + .add_signature( + rsa_sig_header_only.as_ref(), + rsa_sig_header_and_archive.as_ref(), + ) + .build( + header_and_content_len + .try_into() + .expect("signature header + signature length must be <4gb"), + ) + }; + + let metadata = RPMPackageMetadata { + lead, + signature: signature_header, + header: header_idx_tag, + }; + let pkg = RPMPackage { metadata, content }; + Ok(pkg) + } + + /// use prepared data but make sure the signatures are + #[cfg(feature = "signature-meta")] + fn derive_hashes(header: &[u8], content: &[u8]) -> Result<(String, Vec), RPMError> { + let digest_md5 = { + use md5::Digest; + + // across header index and content (compressed or uncompressed, depends on configuration) + let mut hasher = md5::Md5::default(); + hasher.update(header); + hasher.update(content); + let digest_md5 = hasher.finalize(); + digest_md5.to_vec() + }; + + // header only, not the lead, just the header index + let digest_sha1 = { + use sha1::Digest; + + let mut hasher = sha1::Sha1::default(); + hasher.update(header); + let digest_sha1 = hasher.finalize(); + hex::encode(digest_sha1) + }; + + Ok((digest_sha1, digest_md5)) + } + + /// prepare all rpm headers including content + /// + /// @todo split this into multiple `fn`s, one per `IndexTag`-group. + fn prepare_data(mut self) -> Result<(Lead, Header, Vec), RPMError> { + // signature depends on header and payload. So we build these two first. + // then the signature. Then we stitch all together. + // Lead is not important. just build it here + + let lead = Lead::new(&self.name); + + let mut ino_index = 1; + + let mut file_sizes = Vec::new(); + let mut file_modes = Vec::new(); + let mut file_rdevs = Vec::new(); + let mut file_mtimes = Vec::new(); + let mut file_hashes = Vec::new(); + let mut file_linktos = Vec::new(); + let mut file_flags = Vec::new(); + let mut file_usernames = Vec::new(); + let mut file_groupnames = Vec::new(); + let mut file_devices = Vec::new(); + let mut file_inodes = Vec::new(); + let mut file_langs = Vec::new(); + let mut file_verify_flags = Vec::new(); + let mut dir_indixes = Vec::new(); + let mut base_names = Vec::new(); + + let mut combined_file_sizes = 0; + + for (cpio_path, entry) in self.files.iter() { + combined_file_sizes += entry.size; + file_sizes.push(entry.size); + file_modes.push(entry.mode.into()); + // I really do not know the difference. It seems like file_rdevice is always 0 and file_device number always 1. + // Who knows, who cares. + file_rdevs.push(0); + file_devices.push(1); + file_mtimes.push(entry.modified_at); + file_hashes.push(entry.sha_checksum.to_owned()); + file_linktos.push(entry.link.to_owned()); + file_flags.push(entry.flag); + file_usernames.push(entry.user.to_owned()); + file_groupnames.push(entry.group.to_owned()); + file_inodes.push(ino_index); + file_langs.push("".to_string()); + // safe because indexes cannot change after this as the RpmBuilder is consumed + // the dir is guaranteed to be there - or else there is a logic error + let index = self + .directories + .iter() + .position(|d| d == &entry.dir) + .unwrap(); + dir_indixes.push(index as u32); + base_names.push(entry.base_name.to_owned()); + file_verify_flags.push(u32::MAX); // @todo: + let content = entry.content.to_owned(); + let mut writer = cpio::newc::Builder::new(cpio_path) + .mode(entry.mode.into()) + .ino(ino_index) + .uid(self.uid.unwrap_or(0)) + .gid(self.gid.unwrap_or(0)) + .write(&mut self.compressor, content.len() as u32); + + writer.write_all(&content)?; + writer.finish()?; + + ino_index += 1; + } + + self.provides + .push(Dependency::eq(self.name.clone(), self.version.clone())); + self.provides.push(Dependency::eq( + format!("{}({})", self.name.clone(), self.arch.clone()), + self.version.clone(), + )); + + self.requires.push(Dependency::rpmlib( + "rpmlib(PayloadFilesHavePrefix)".to_string(), + "4.0-1".to_string(), + )); + + self.requires.push(Dependency::rpmlib( + "rpmlib(CompressedFileNames)".to_string(), + "3.0.4-1".to_string(), + )); + + self.requires.push(Dependency::rpmlib( + "rpmlib(FileDigests)".to_string(), + "4.6.0-1".to_string(), + )); + + if matches!(self.compressor, Compressor::Zstd(_)) { + self.requires.push(Dependency::rpmlib( + "rpmlib(PayloadIsZstd)".to_string(), + "5.4.18-1".to_string(), + )); + } + + let mut provide_names = Vec::new(); + let mut provide_flags = Vec::new(); + let mut provide_versions = Vec::new(); + + for d in self.provides.into_iter() { + provide_names.push(d.dep_name); + provide_flags.push(d.sense); + provide_versions.push(d.version); + } + + let mut obsolete_names = Vec::new(); + let mut obsolete_flags = Vec::new(); + let mut obsolete_versions = Vec::new(); + + for d in self.obsoletes.into_iter() { + obsolete_names.push(d.dep_name); + obsolete_flags.push(d.sense); + obsolete_versions.push(d.version); + } + + let mut require_names = Vec::new(); + let mut require_flags = Vec::new(); + let mut require_versions = Vec::new(); + + for d in self.requires.into_iter() { + require_names.push(d.dep_name); + require_flags.push(d.sense); + require_versions.push(d.version); + } + + let mut conflicts_names = Vec::new(); + let mut conflicts_flags = Vec::new(); + let mut conflicts_versions = Vec::new(); + + for d in self.conflicts.into_iter() { + conflicts_names.push(d.dep_name); + conflicts_flags.push(d.sense); + conflicts_versions.push(d.version); + } + + let offset = 0; + + let mut actual_records = vec![ + IndexEntry::new( + IndexTag::RPMTAG_HEADERI18NTABLE, + offset, + IndexData::StringTag("C".to_string()), + ), + IndexEntry::new( + IndexTag::RPMTAG_NAME, + offset, + IndexData::StringTag(self.name), + ), + IndexEntry::new( + IndexTag::RPMTAG_EPOCH, + offset, + IndexData::Int32(vec![self.epoch]), + ), + IndexEntry::new( + IndexTag::RPMTAG_BUILDTIME, + offset, + IndexData::Int32(vec![self.build_time]), + ), + IndexEntry::new( + IndexTag::RPMTAG_VERSION, + offset, + IndexData::StringTag(self.version), + ), + IndexEntry::new( + IndexTag::RPMTAG_RELEASE, + offset, + IndexData::StringTag(self.release), + ), + IndexEntry::new( + IndexTag::RPMTAG_DESCRIPTION, + offset, + IndexData::StringTag(self.desc.clone()), + ), + IndexEntry::new( + IndexTag::RPMTAG_SUMMARY, + offset, + IndexData::StringTag(self.desc), + ), + IndexEntry::new( + IndexTag::RPMTAG_SIZE, + offset, + IndexData::Int32(vec![combined_file_sizes]), + ), + IndexEntry::new( + IndexTag::RPMTAG_LICENSE, + offset, + IndexData::StringTag(self.license), + ), + // + // IndexEntry::new(IndexTag::RPMTAG_GROUP, offset, IndexData::I18NString(group)), + IndexEntry::new( + IndexTag::RPMTAG_OS, + offset, + IndexData::StringTag("linux".to_string()), + ), + IndexEntry::new( + IndexTag::RPMTAG_GROUP, + offset, + IndexData::I18NString(vec!["Unspecified".to_string()]), + ), + IndexEntry::new( + IndexTag::RPMTAG_ARCH, + offset, + IndexData::StringTag(self.arch), + ), + IndexEntry::new( + IndexTag::RPMTAG_ENCODING, + offset, + IndexData::StringTag("utf-8".to_string()), + ), + IndexEntry::new( + IndexTag::RPMTAG_PAYLOADFORMAT, + offset, + IndexData::StringTag("cpio".to_string()), + ), + ]; + + // if we have an empty RPM, we have to leave out all file related index entries. + if !self.files.is_empty() { + actual_records.extend([ + IndexEntry::new( + IndexTag::RPMTAG_FILESIZES, + offset, + IndexData::Int32(file_sizes), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEMODES, + offset, + IndexData::Int16(file_modes), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILERDEVS, + offset, + IndexData::Int16(file_rdevs), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEMTIMES, + offset, + IndexData::Int32(file_mtimes), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEDIGESTS, + offset, + IndexData::StringArray(file_hashes), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILELINKTOS, + offset, + IndexData::StringArray(file_linktos), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEFLAGS, + offset, + IndexData::Int32(file_flags), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEUSERNAME, + offset, + IndexData::StringArray(file_usernames), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEGROUPNAME, + offset, + IndexData::StringArray(file_groupnames), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEDEVICES, + offset, + IndexData::Int32(file_devices), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEINODES, + offset, + IndexData::Int32(file_inodes), + ), + IndexEntry::new( + IndexTag::RPMTAG_DIRINDEXES, + offset, + IndexData::Int32(dir_indixes), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILELANGS, + offset, + IndexData::StringArray(file_langs), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEDIGESTALGO, + offset, + IndexData::Int32(vec![FileDigestAlgorithm::Sha2_256 as u32]), + ), + IndexEntry::new( + IndexTag::RPMTAG_FILEVERIFYFLAGS, + offset, + IndexData::Int32(file_verify_flags), + ), + IndexEntry::new( + IndexTag::RPMTAG_BASENAMES, + offset, + IndexData::StringArray(base_names), + ), + IndexEntry::new( + IndexTag::RPMTAG_DIRNAMES, + offset, + IndexData::StringArray(self.directories.into_iter().collect()), + ), + IndexEntry::new( + IndexTag::RPMTAG_PROVIDENAME, + offset, + IndexData::StringArray(provide_names), + ), + ]); + } + + actual_records.extend([ + IndexEntry::new( + IndexTag::RPMTAG_PROVIDEVERSION, + offset, + IndexData::StringArray(provide_versions), + ), + IndexEntry::new( + IndexTag::RPMTAG_PROVIDEFLAGS, + offset, + IndexData::Int32(provide_flags), + ), + ]); + + let possible_compression_details = self.compressor.get_details(); + + if let Some(details) = possible_compression_details { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_PAYLOADCOMPRESSOR, + offset, + IndexData::StringTag(details.compression_name.to_string()), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_PAYLOADFLAGS, + offset, + IndexData::StringTag(details.compression_level.to_string()), + )); + } + + if !self.changelog_authors.is_empty() { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_CHANGELOGNAME, + offset, + IndexData::StringArray(self.changelog_authors), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_CHANGELOGTEXT, + offset, + IndexData::StringArray(self.changelog_entries), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_CHANGELOGTIME, + offset, + IndexData::Int32(self.changelog_times), + )); + } + + if !obsolete_flags.is_empty() { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_OBSOLETENAME, + offset, + IndexData::StringArray(obsolete_names), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_OBSOLETEVERSION, + offset, + IndexData::StringArray(obsolete_versions), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_OBSOLETEFLAGS, + offset, + IndexData::Int32(obsolete_flags), + )); + } + + if !require_flags.is_empty() { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_REQUIRENAME, + offset, + IndexData::StringArray(require_names), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_REQUIREVERSION, + offset, + IndexData::StringArray(require_versions), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_REQUIREFLAGS, + offset, + IndexData::Int32(require_flags), + )); + } + + if !conflicts_flags.is_empty() { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_CONFLICTNAME, + offset, + IndexData::StringArray(conflicts_names), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_CONFLICTVERSION, + offset, + IndexData::StringArray(conflicts_versions), + )); + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_CONFLICTFLAGS, + offset, + IndexData::Int32(conflicts_flags), + )); + } + + if let Some(pre_inst_script) = self.pre_inst_script { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_PREIN, + offset, + IndexData::StringTag(pre_inst_script), + )); + } + + if let Some(post_inst_script) = self.post_inst_script { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_POSTIN, + offset, + IndexData::StringTag(post_inst_script), + )); + } + + if let Some(pre_uninst_script) = self.pre_uninst_script { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_PREUN, + offset, + IndexData::StringTag(pre_uninst_script), + )); + } + + if let Some(post_uninst_script) = self.post_uninst_script { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_POSTUN, + offset, + IndexData::StringTag(post_uninst_script), + )); + } + + if let Some(vendor) = self.vendor { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_VENDOR, + offset, + IndexData::StringTag(vendor), + )); + } + + if let Some(url) = self.url { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_URL, + offset, + IndexData::StringTag(url), + )); + } + + if let Some(vcs) = self.vcs { + actual_records.push(IndexEntry::new( + IndexTag::RPMTAG_VCS, + offset, + IndexData::StringTag(vcs), + )); + } + + let header = Header::from_entries(actual_records, IndexTag::RPMTAG_HEADERIMMUTABLE); + self.compressor = cpio::newc::trailer(self.compressor)?; + let content = self.compressor.finish_compression()?; + + Ok((lead, header, content)) + } +} diff --git a/rdnf/rpm/src/rpm/compressor.rs b/rdnf/rpm/src/rpm/compressor.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a2eb11b3c1b64fa8b236f1cb43db487270d3558 --- /dev/null +++ b/rdnf/rpm/src/rpm/compressor.rs @@ -0,0 +1,69 @@ +use crate::errors::*; +use std::io::Write; + +pub enum Compressor { + None(Vec), + Gzip(libflate::gzip::Encoder>), + Zstd(zstd::stream::Encoder<'static, Vec>), +} + +impl Write for Compressor { + fn write(&mut self, content: &[u8]) -> Result { + match self { + Compressor::None(data) => data.write(content), + Compressor::Gzip(encoder) => encoder.write(content), + Compressor::Zstd(encoder) => encoder.write(content), + } + } + fn flush(&mut self) -> Result<(), std::io::Error> { + match self { + Compressor::None(data) => data.flush(), + Compressor::Gzip(encoder) => encoder.flush(), + Compressor::Zstd(encoder) => encoder.flush(), + } + } +} +// 19 is used here as its 19 for fedora +impl std::str::FromStr for Compressor { + type Err = RPMError; + fn from_str(raw: &str) -> Result { + match raw { + "none" => Ok(Compressor::None(Vec::new())), + "gzip" => Ok(Compressor::Gzip(libflate::gzip::Encoder::new(Vec::new())?)), + "zstd" => Ok(Compressor::Zstd(zstd::stream::Encoder::new( + Vec::new(), + 19, + )?)), + _ => Err(RPMError::UnknownCompressorType(raw.to_string())), + } + } +} + +impl Compressor { + pub(crate) fn finish_compression(self) -> Result, RPMError> { + match self { + Compressor::None(data) => Ok(data), + Compressor::Gzip(encoder) => Ok(encoder.finish().into_result()?), + Compressor::Zstd(encoder) => Ok(encoder.finish()?), + } + } + + pub(crate) fn get_details(&self) -> Option { + match self { + Compressor::None(_) => None, + Compressor::Gzip(_) => Some(CompressionDetails { + compression_level: "9", + compression_name: "gzip", + }), + Compressor::Zstd(_) => Some(CompressionDetails { + compression_level: "19", + compression_name: "zstd", + }), + } + } +} + +pub(crate) struct CompressionDetails { + pub(crate) compression_level: &'static str, + pub(crate) compression_name: &'static str, +} diff --git a/rdnf/rpm/src/rpm/headers/header.rs b/rdnf/rpm/src/rpm/headers/header.rs new file mode 100644 index 0000000000000000000000000000000000000000..ee98f63c4133f4dd341e2701a08ca53523bcceef --- /dev/null +++ b/rdnf/rpm/src/rpm/headers/header.rs @@ -0,0 +1,983 @@ +use nom::bytes::complete; +use nom::number::complete::{be_i32, be_u16, be_u32, be_u64, be_u8}; + +#[cfg(feature = "async-futures")] +use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; + +use crate::constants::{self, *}; +use std::fmt; +use std::path::PathBuf; + +use super::*; +use crate::errors::*; + +/// Header tag. +/// +/// Each and every header has a particular header tag that identifies the type of +/// the header the format / information contained in that header. +pub trait Tag: + num::FromPrimitive + num::ToPrimitive + PartialEq + fmt::Display + fmt::Debug + Copy + TypeName +{ +} + +impl Tag for T where + T: num::FromPrimitive + + num::ToPrimitive + + PartialEq + + fmt::Display + + fmt::Debug + + Copy + + TypeName +{ +} + +#[derive(Debug, PartialEq)] +pub struct Header { + pub(crate) index_header: IndexHeader, + pub(crate) index_entries: Vec>, + pub(crate) store: Vec, +} +impl AsRef<[u8]> for Header +where + T: Tag, +{ + fn as_ref(&self) -> &[u8] { + &self.store + } +} +impl Header +where + T: Tag, +{ + #[cfg(feature = "async-futures")] + pub(crate) async fn parse_async( + input: &mut I, + ) -> Result, RPMError> { + let mut buf: [u8; 16] = [0; 16]; + input.read_exact(&mut buf).await?; + let index_header = IndexHeader::parse(&buf)?; + // read rest of header => each index consists of 16 bytes. The index header knows how large the store is. + let mut buf = vec![0; (index_header.header_size + index_header.num_entries * 16) as usize]; + input.read_exact(&mut buf).await?; + Self::parse_header(index_header, &buf[..]) + } + + fn parse_header(index_header: IndexHeader, mut bytes: &[u8]) -> Result, RPMError> { + // parse all entries + let mut entries: Vec> = Vec::new(); + let mut buf_len = bytes.len(); + for _ in 0..index_header.num_entries { + let (rest, entry) = IndexEntry::parse(bytes)?; + entries.push(entry); + bytes = rest; + assert_eq!(16, buf_len - bytes.len()); + buf_len = bytes.len(); + } + + assert_eq!(bytes.len(), index_header.header_size as usize); + + let store = Vec::from(bytes); + // add data to entries + for entry in &mut entries { + let mut remaining = &bytes[entry.offset as usize..]; + match &mut entry.data { + IndexData::Null => {} + IndexData::Char(ref mut chars) => { + parse_entry_data_number(remaining, entry.num_items, chars, be_u8)?; + } + IndexData::Int8(ref mut ints) => { + parse_entry_data_number(remaining, entry.num_items, ints, be_u8)?; + } + IndexData::Int16(ref mut ints) => { + parse_entry_data_number(remaining, entry.num_items, ints, be_u16)?; + } + IndexData::Int32(ref mut ints) => { + parse_entry_data_number(remaining, entry.num_items, ints, be_u32)?; + } + IndexData::Int64(ref mut ints) => { + parse_entry_data_number(remaining, entry.num_items, ints, be_u64)?; + } + IndexData::StringTag(ref mut string) => { + let (_rest, raw_string) = complete::take_till(|item| item == 0)(remaining)?; + string.push_str(String::from_utf8_lossy(raw_string).as_ref()); + } + IndexData::Bin(ref mut bin) => { + parse_entry_data_number(remaining, entry.num_items, bin, be_u8)?; + } + IndexData::StringArray(ref mut strings) => { + for _ in 0..entry.num_items { + let (rest, raw_string) = complete::take_till(|item| item == 0)(remaining)?; + // the null byte is still in there.. we need to cut it out. + remaining = &rest[1..]; + let string = String::from_utf8_lossy(raw_string).to_string(); + strings.push(string); + } + } + IndexData::I18NString(ref mut strings) => { + for _ in 0..entry.num_items { + let (rest, raw_string) = complete::take_till(|item| item == 0)(remaining)?; + remaining = rest; + let string = String::from_utf8_lossy(raw_string).to_string(); + strings.push(string); + } + } + } + } + + Ok(Header { + index_header, + index_entries: entries, + store, + }) + } + + pub(crate) fn parse(input: &mut I) -> Result, RPMError> { + let mut buf: [u8; 16] = [0; 16]; + input.read_exact(&mut buf)?; + let index_header = IndexHeader::parse(&buf)?; + // read rest of header => each index consists of 16 bytes. The index header knows how large the store is. + let mut buf = vec![0; (index_header.header_size + index_header.num_entries * 16) as usize]; + input.read_exact(&mut buf)?; + Self::parse_header(index_header, &buf[..]) + } + + #[cfg(feature = "async-futures")] + pub(crate) async fn write_async( + &self, + out: &mut W, + ) -> Result<(), RPMError> { + self.index_header.write_async(out).await?; + for entry in &self.index_entries { + entry.write_index_async(out).await?; + } + out.write_all(&self.store).await?; + Ok(()) + } + + pub(crate) fn write(&self, out: &mut W) -> Result<(), RPMError> { + self.index_header.write(out)?; + for entry in &self.index_entries { + entry.write_index(out)?; + } + out.write_all(&self.store)?; + Ok(()) + } + + pub(crate) fn find_entry_or_err(&self, tag: &T) -> Result<&IndexEntry, RPMError> { + self.index_entries + .iter() + .find(|entry| &entry.tag == tag) + .ok_or_else(|| RPMError::TagNotFound(tag.to_string())) + } + + #[cfg(feature = "signature-meta")] + pub(crate) fn get_entry_data_as_binary(&self, tag: T) -> Result<&[u8], RPMError> { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_binary() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "binary", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + pub(crate) fn get_entry_data_as_string(&self, tag: T) -> Result<&str, RPMError> { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_str() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "string", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + pub(crate) fn get_entry_data_as_u16_array(&self, tag: T) -> Result, RPMError> { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_u16_array() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "uint16 array", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + pub(crate) fn get_entry_data_as_u32(&self, tag: T) -> Result { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_u32() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "uint32", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + pub(crate) fn get_entry_data_as_u32_array(&self, tag: T) -> Result, RPMError> { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_u32_array() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "uint32 array", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + #[allow(unused)] + pub(crate) fn get_entry_data_as_u64(&self, tag: T) -> Result { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_u64() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "uint64", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + pub(crate) fn get_entry_data_as_u64_array(&self, tag: T) -> Result, RPMError> { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_u64_array() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "uint64 array", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + pub(crate) fn get_entry_data_as_string_array(&self, tag: T) -> Result<&[String], RPMError> { + let entry = self.find_entry_or_err(&tag)?; + entry + .data + .as_string_array() + .ok_or_else(|| RPMError::UnexpectedTagDataType { + expected_data_type: "string array", + actual_data_type: entry.data.to_string(), + tag: entry.tag.to_string(), + }) + } + + pub(crate) fn create_region_tag(tag: T, records_count: i32, offset: i32) -> IndexEntry { + let mut header_immutable_index_data = vec![]; + let mut hie = IndexEntry::new(tag, (records_count + 1) * -16, IndexData::Bin(Vec::new())); + hie.num_items = 16; + hie.write_index(&mut header_immutable_index_data) + .expect("unable to write to memory buffer"); + IndexEntry::new(tag, offset, IndexData::Bin(header_immutable_index_data)) + } + + pub(crate) fn from_entries(mut actual_records: Vec>, region_tag: T) -> Self { + let mut store = Vec::new(); + for record in &mut actual_records { + record.offset = store.len() as i32; + let alignment = record.data.append(&mut store); + record.offset += alignment as i32; + } + + let region_tag = + Self::create_region_tag(region_tag, actual_records.len() as i32, store.len() as i32); + region_tag.data.append(&mut store); + + let mut all_records = vec![region_tag]; + + all_records.append(&mut actual_records); + let store_size = store.len(); + + // TODO dunno if this is necessary yet. + // if store_size % 8 > 0 { + // store_size += 8 - (store_size % 8); + // } + let index_header = IndexHeader::new(all_records.len() as u32, store_size as u32); + Header { + index_entries: all_records, + index_header, + store, + } + } +} + +impl Header { + /// Create a new full signature header. + /// + /// `size` is combined size of header, header store and the payload + /// + /// PGP and RSA tags expect signatures according to [RFC2440](https://tools.ietf.org/html/rfc2440) + /// + /// Please use the [`builder`](Self::builder()) which has modular and safe API. + #[cfg(feature = "signature-meta")] + pub(crate) fn new_signature_header( + headers_plus_payload_size: u32, + md5sum: &[u8], + sha1: String, + rsa_spanning_header: &[u8], + rsa_spanning_header_and_archive: &[u8], + ) -> Self { + SignatureHeaderBuilder::new() + .add_digest(sha1.as_str(), md5sum) + .add_signature(rsa_spanning_header, rsa_spanning_header_and_archive) + .build(headers_plus_payload_size) + } + + #[cfg(feature = "signature-meta")] + pub fn builder() -> SignatureHeaderBuilder { + SignatureHeaderBuilder::::new() + } + + #[cfg(feature = "async-futures")] + pub(crate) async fn parse_signature_async( + input: &mut I, + ) -> Result, RPMError> { + let result = Self::parse_async(input).await?; + + let modulo = result.index_header.header_size % 8; + if modulo > 0 { + let align_size = 8 - modulo; + let mut discard = vec![0; align_size as usize]; + input.read_exact(&mut discard).await?; + } + Ok(result) + } + + pub(crate) fn parse_signature( + input: &mut I, + ) -> Result, RPMError> { + let result = Self::parse(input)?; + // this structure is aligned to 8 bytes - rest is filled up with zeros. + // if the size of our store is not a modulo of 8, we discard bytes to align to the 8 byte boundary. + let modulo = result.index_header.header_size % 8; + if modulo > 0 { + let align_size = 8 - modulo; + let mut discard = vec![0; align_size as usize]; + input.read_exact(&mut discard)?; + } + Ok(result) + } + + #[cfg(feature = "async-futures")] + pub(crate) async fn write_signature_async( + &self, + out: &mut W, + ) -> Result<(), RPMError> { + self.write_async(out).await?; + let modulo = self.index_header.header_size % 8; + if modulo > 0 { + let expansion = vec![0; 8 - modulo as usize]; + out.write_all(&expansion).await?; + } + Ok(()) + } + + pub(crate) fn write_signature(&self, out: &mut W) -> Result<(), RPMError> { + self.write(out)?; + let modulo = self.index_header.header_size % 8; + if modulo > 0 { + let expansion = vec![0; 8 - modulo as usize]; + out.write_all(&expansion)?; + } + Ok(()) + } + + pub fn new_empty() -> Self { + Self { + index_header: IndexHeader::new(0, 0), + index_entries: vec![], + store: vec![], + } + } + + pub fn clear(&mut self) { + self.index_entries.clear(); + self.index_header.header_size = 0; + self.index_header.num_entries = 0; + self.store.clear() + } +} + +/// User facing accessor type representing ownership of a file +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct FileOwnership { + pub user: String, + pub group: String, +} + +/// Declaration what category this file belongs to +#[repr(u32)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, enum_primitive_derive::Primitive)] +pub enum FileCategory { + None = 0, + Config = constants::RPMFILE_CONFIG, + Doc = constants::RPMFILE_DOC, +} + +impl Default for FileCategory { + fn default() -> Self { + Self::None + } +} + +#[repr(u32)] +#[derive(Debug, Clone, Copy, enum_primitive_derive::Primitive)] +pub enum FileDigestAlgorithm { + // broken and very broken + Md5 = constants::PGPHASHALGO_MD5, + Sha1 = constants::PGPHASHALGO_SHA1, + Md2 = constants::PGPHASHALGO_MD2, + + // not proven to be broken, weaker variants broken + #[allow(non_camel_case_types)] + Haval_5_160 = constants::PGPHASHALGO_HAVAL_5_160, // not part of PGP + Ripemd160 = constants::PGPHASHALGO_RIPEMD160, + + Tiger192 = constants::PGPHASHALGO_TIGER192, // not part of PGP + Sha2_256 = constants::PGPHASHALGO_SHA256, + Sha2_384 = constants::PGPHASHALGO_SHA384, + Sha2_512 = constants::PGPHASHALGO_SHA512, + Sha2_224 = constants::PGPHASHALGO_SHA224, +} + +impl Default for FileDigestAlgorithm { + fn default() -> Self { + // if the entry is missing, this is the default fallback + Self::Md5 + } +} + +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub enum FileDigest { + Md5(Vec), + Sha2_256(Vec), + Sha2_384(Vec), + Sha2_512(Vec), + Sha2_224(Vec), + // @todo unsupported other types for now +} + +impl FileDigest { + pub fn load_from_str( + algorithm: FileDigestAlgorithm, + stringly_data: impl AsRef, + ) -> Result { + let hex: Vec = hex::decode(stringly_data.as_ref())?; + Ok(match algorithm { + FileDigestAlgorithm::Md5 if hex.len() == 16 => FileDigest::Md5(hex), + FileDigestAlgorithm::Sha2_256 if hex.len() == 32 => FileDigest::Sha2_256(hex), + FileDigestAlgorithm::Sha2_224 if hex.len() == 30 => FileDigest::Sha2_224(hex), + FileDigestAlgorithm::Sha2_384 if hex.len() == 48 => FileDigest::Sha2_384(hex), + FileDigestAlgorithm::Sha2_512 if hex.len() == 64 => FileDigest::Sha2_512(hex), + // @todo disambiguate mismatch of length from unsupported algorithm + digest_algo => return Err(RPMError::UnsupportedFileDigestAlgorithm(digest_algo)), + }) + } +} + +/// User facing accessor type for a file entry with contextual information +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct FileEntry { + /// Full path of the file entry and where it will be installed to. + pub path: PathBuf, + /// The file mode of the file. + pub mode: types::FileMode, + /// Defines the owning user and group. + pub ownership: FileOwnership, + /// Clocks the last access time. + pub modified_at: chrono::DateTime, + /// The size of this file, dirs have the inode size (which is insane) + pub size: usize, + /// Categorizes the file or directory into three groups. + pub category: FileCategory, + // @todo SELinux context? how is that done? + pub digest: Option, +} +#[derive(Debug, Clone)] +pub struct RpmEntry { + pub name: String, + pub flags: Option, + pub epoch: Option, + pub version: Option, + pub release: Option, +} +fn parse_entry_data_number<'a, T, E, F>( + mut input: &'a [u8], + num_items: u32, + items: &mut Vec, + parser: F, +) -> nom::IResult<&'a [u8], (), E> +where + E: nom::error::ParseError<&'a [u8]>, + F: Fn(&'a [u8]) -> nom::IResult<&'a [u8], T, E>, +{ + for _ in 0..num_items { + let (rest, data) = parser(input)?; + items.push(data); + input = rest; + } + + Ok((input, ())) +} + +#[cfg(test)] +mod tests2 { + use super::*; + + #[cfg(feature = "signature-meta")] + #[test] + fn signature_header_build() { + let size: u32 = 209_348; + let md5sum: &[u8] = &[22u8; 16]; + let sha1: String = "5A884F0CB41EC3DA6D6E7FC2F6AB9DECA8826E8D".to_owned(); + let rsa_spanning_header: &[u8] = b"111222333444"; + let rsa_spanning_header_and_archive: &[u8] = b"7777888899990000"; + + let truth = { + let offset = 0; + let entries = vec![ + IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_SIZE, + offset, + IndexData::Int32(vec![size]), + ), + // TODO consider dropping md5 in favour of sha256 + IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_MD5, + offset, + IndexData::Bin(md5sum.to_vec()), + ), + IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_SHA1, + offset, + IndexData::StringTag(sha1.clone()), + ), + IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_RSA, + offset, + IndexData::Bin(rsa_spanning_header.to_vec()), + ), + IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_PGP, + offset, + IndexData::Bin(rsa_spanning_header_and_archive.to_vec()), + ), + ]; + Header::::from_entries(entries, IndexSignatureTag::HEADER_SIGNATURES) + }; + + let built = Header::::new_signature_header( + size, + md5sum, + sha1, + rsa_spanning_header, + rsa_spanning_header_and_archive, + ); + + assert_eq!(built, truth); + } +} + +/// A header keeping track of all other header records. +#[derive(Debug, PartialEq)] +pub(crate) struct IndexHeader { + /// rpm specific magic header + pub(crate) magic: [u8; 3], + /// rpm version number, always 1 + pub(crate) version: u8, + /// number of header entries + pub(crate) num_entries: u32, + /// total header size excluding the fixed part ( I think ) + pub(crate) header_size: u32, +} + +impl IndexHeader { + // 16 bytes + pub(crate) fn parse(input: &[u8]) -> Result { + // first three bytes are magic + let (rest, magic) = complete::take(3usize)(input)?; + for i in 0..2 { + if HEADER_MAGIC[i] != magic[i] { + return Err(RPMError::InvalidMagic { + expected: HEADER_MAGIC[i], + actual: magic[i], + complete_input: input.to_vec(), + }); + } + } + // then version + let (rest, version) = be_u8(rest)?; + + if version != 1 { + return Err(RPMError::UnsupportedHeaderVersion(version)); + } + // then reserved + let (rest, _) = complete::take(4usize)(rest)?; + // then number of of entries + let (rest, num_entries) = be_u32(rest)?; + // then size of header + let (_rest, header_size) = be_u32(rest)?; + + Ok(IndexHeader { + magic: HEADER_MAGIC, + version: 1, + num_entries, + header_size, + }) + } + + pub(crate) fn write(&self, out: &mut W) -> Result<(), RPMError> { + out.write_all(&self.magic)?; + out.write_all(&self.version.to_be_bytes())?; + out.write_all(&[0; 4])?; + out.write_all(&self.num_entries.to_be_bytes())?; + out.write_all(&self.header_size.to_be_bytes())?; + Ok(()) + } + + #[cfg(feature = "async-futures")] + pub(crate) async fn write_async( + &self, + out: &mut W, + ) -> Result<(), RPMError> { + out.write_all(&self.magic).await?; + out.write_all(&self.version.to_be_bytes()).await?; + out.write_all(&[0; 4]).await?; + out.write_all(&self.num_entries.to_be_bytes()).await?; + out.write_all(&self.header_size.to_be_bytes()).await?; + Ok(()) + } + + pub(crate) fn new(num_entries: u32, header_size: u32) -> Self { + IndexHeader { + magic: HEADER_MAGIC, + version: 1, + num_entries, + header_size, + } + } +} + +/// A single entry within the [`IndexHeader`](self::IndexHeader) +#[derive(Debug, PartialEq)] +pub(crate) struct IndexEntry { + pub(crate) tag: T, + pub(crate) data: IndexData, + pub(crate) offset: i32, + pub(crate) num_items: u32, +} + +use crate::constants::TypeName; + +impl IndexEntry { + // 16 bytes + pub(crate) fn parse(input: &[u8]) -> Result<(&[u8], Self), RPMError> { + //first 4 bytes are the tag. + let (input, raw_tag) = be_u32(input)?; + + let tag: T = num::FromPrimitive::from_u32(raw_tag).ok_or_else(|| RPMError::InvalidTag { + raw_tag, + store_type: T::type_name(), + })?; + //next 4 bytes is the tag type + let (input, raw_tag_type) = be_u32(input)?; + + // initialize the datatype. Parsing of the data happens later since the store comes after the index section. + let data = IndexData::from_type_as_u32(raw_tag_type).ok_or_else(|| { + RPMError::InvalidTagDataType { + raw_data_type: raw_tag_type, + store_type: T::type_name(), + } + })?; + + // next 4 bytes is the offset relative to the beginning of the store + let (input, offset) = be_i32(input)?; + + // last 4 bytes are the count that contains the number of data items pointed to by the index entry + let (rest, num_items) = be_u32(input)?; + + Ok(( + rest, + IndexEntry { + tag, + data, + offset, + num_items, + }, + )) + } + + #[cfg(feature = "async-futures")] + pub(crate) async fn write_index_async( + &self, + out: &mut W, + ) -> Result<(), RPMError> { + // unwrap() is safe because tags are predefined and are all within u32 range. + let mut written = out.write(&self.tag.to_u32().unwrap().to_be_bytes()).await?; + written += out.write(&self.data.type_as_u32().to_be_bytes()).await?; + written += out.write(&self.offset.to_be_bytes()).await?; + written += out.write(&self.num_items.to_be_bytes()).await?; + assert_eq!(16, written, "there should be 16 bytes written"); + Ok(()) + } + + pub(crate) fn write_index(&self, out: &mut W) -> Result<(), RPMError> { + // unwrap() is safe because tags are predefined. + let mut written = out.write(&self.tag.to_u32().unwrap().to_be_bytes())?; + written += out.write(&self.data.type_as_u32().to_be_bytes())?; + written += out.write(&self.offset.to_be_bytes())?; + written += out.write(&self.num_items.to_be_bytes())?; + assert_eq!(16, written, "there should be 16 bytes written"); + Ok(()) + } + + pub(crate) fn new(tag: T, offset: i32, data: IndexData) -> IndexEntry { + IndexEntry { + tag, + offset, + num_items: data.num_items(), + data, + } + } +} + +/// Data as present in a [`IndexEntry`](self::IndexEntry) . +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum IndexData { + Null, + Char(Vec), + Int8(Vec), + Int16(Vec), + Int32(Vec), + Int64(Vec), + StringTag(String), + Bin(Vec), + StringArray(Vec), + I18NString(Vec), +} + +impl fmt::Display for IndexData { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let rep = match self { + IndexData::Null => "Null", + IndexData::Bin(_) => "Bin", + IndexData::Char(_) => "Char", + IndexData::I18NString(_) => "I18NString", + IndexData::StringTag(_) => "String", + IndexData::StringArray(_) => "StringArray", + IndexData::Int8(_) => "i8", + IndexData::Int16(_) => "i16", + IndexData::Int32(_) => "i32", + IndexData::Int64(_) => "i64", + }; + write!(f, "{}", rep) + } +} + +impl IndexData { + pub(crate) fn append(&self, store: &mut Vec) -> u32 { + match &self { + IndexData::Null => 0, + IndexData::Char(d) => { + store.extend_from_slice(d); + 0 + } + IndexData::Int8(d) => { + for i in d.iter().map(|i| i.to_be_bytes()) { + store.push(i[0]); + } + 0 + } + IndexData::Int16(d) => { + // align to 2 bytes + + let alignment = if store.len() % 2 != 0 { + store.push(0); + 1 + } else { + 0 + }; + let iter = d.iter().flat_map(|item| item.to_be_bytes().to_vec()); + for byte in iter { + store.push(byte); + } + alignment + } + IndexData::Int32(d) => { + // align to 4 bytes + let mut alignment = 0; + while store.len() % 4 > 0 { + store.push(0); + alignment += 1; + } + let iter = d.iter().flat_map(|item| item.to_be_bytes().to_vec()); + for byte in iter { + store.push(byte); + } + alignment + } + IndexData::Int64(d) => { + // align to 8 bytes + let mut alignment = 0; + while store.len() % 8 > 0 { + store.push(0); + alignment += 1; + } + let iter = d.iter().flat_map(|item| item.to_be_bytes().to_vec()); + for byte in iter { + store.push(byte); + } + alignment + } + IndexData::StringTag(d) => { + store.extend_from_slice(d.as_bytes()); + store.push(0); + 0 + } + IndexData::Bin(d) => { + store.extend_from_slice(d); + 0 + } + IndexData::StringArray(d) => { + for item in d { + store.extend_from_slice(item.as_bytes()); + store.push(0); + } + 0 + } + IndexData::I18NString(d) => { + for item in d { + store.extend_from_slice(item.as_bytes()); + store.push(0); + } + 0 + } + } + } + + pub(crate) fn num_items(&self) -> u32 { + match self { + IndexData::Null => 0, + IndexData::Bin(items) => items.len() as u32, + IndexData::Char(items) => items.len() as u32, + IndexData::I18NString(items) => items.len() as u32, + IndexData::StringTag(_) => 1, + IndexData::StringArray(items) => items.len() as u32, + IndexData::Int8(items) => items.len() as u32, + IndexData::Int16(items) => items.len() as u32, + IndexData::Int32(items) => items.len() as u32, + IndexData::Int64(items) => items.len() as u32, + } + } + + pub(crate) fn from_type_as_u32(i: u32) -> Option { + match i { + 0 => Some(IndexData::Null), + 1 => Some(IndexData::Char(Vec::new())), + 2 => Some(IndexData::Int8(Vec::new())), + 3 => Some(IndexData::Int16(Vec::new())), + 4 => Some(IndexData::Int32(Vec::new())), + 5 => Some(IndexData::Int64(Vec::new())), + 6 => Some(IndexData::StringTag(String::new())), + 7 => Some(IndexData::Bin(Vec::new())), + 8 => Some(IndexData::StringArray(Vec::new())), + 9 => Some(IndexData::I18NString(Vec::new())), + _ => None, + } + } + + pub(crate) fn type_as_u32(&self) -> u32 { + match self { + IndexData::Null => 0, + IndexData::Char(_) => 1, + IndexData::Int8(_) => 2, + IndexData::Int16(_) => 3, + IndexData::Int32(_) => 4, + IndexData::Int64(_) => 5, + IndexData::StringTag(_) => 6, + IndexData::Bin(_) => 7, + + IndexData::StringArray(_) => 8, + IndexData::I18NString(_) => 9, + } + } + + pub(crate) fn as_str(&self) -> Option<&str> { + match self { + IndexData::StringTag(s) => Some(s), + _ => None, + } + } + + #[allow(unused)] + pub(crate) fn as_char_array(&self) -> Option> { + match self { + IndexData::Char(s) => Some(s.to_vec()), + _ => None, + } + } + + #[allow(unused)] + pub(crate) fn as_u8_array(&self) -> Option> { + match self { + IndexData::Int8(s) => Some(s.to_vec()), + _ => None, + } + } + + pub(crate) fn as_u16_array(&self) -> Option> { + match self { + IndexData::Int16(s) => Some(s.to_vec()), + _ => None, + } + } + + pub(crate) fn as_u32(&self) -> Option { + match self { + IndexData::Int32(s) => s.first().copied(), + _ => None, + } + } + pub(crate) fn as_u32_array(&self) -> Option> { + match self { + IndexData::Int32(s) => Some(s.to_vec()), + _ => None, + } + } + + #[allow(unused)] + pub(crate) fn as_u64(&self) -> Option { + match self { + IndexData::Int64(s) => s.first().copied(), + _ => None, + } + } + + pub(crate) fn as_u64_array(&self) -> Option> { + match self { + IndexData::Int64(s) => Some(s.to_vec()), + _ => None, + } + } + + pub(crate) fn as_string_array(&self) -> Option<&[String]> { + match self { + IndexData::StringArray(d) | IndexData::I18NString(d) => Some(d), + _ => None, + } + } + + #[cfg(feature = "signature-meta")] + pub(crate) fn as_binary(&self) -> Option<&[u8]> { + match self { + IndexData::Bin(d) => Some(d.as_slice()), + _ => None, + } + } +} diff --git a/rdnf/rpm/src/rpm/headers/lead.rs b/rdnf/rpm/src/rpm/headers/lead.rs new file mode 100644 index 0000000000000000000000000000000000000000..76e70ee0dee79284f77715976864c8001742fd4d --- /dev/null +++ b/rdnf/rpm/src/rpm/headers/lead.rs @@ -0,0 +1,147 @@ +use nom::bytes::complete; +use nom::number::complete::{be_u16, be_u8}; +use std::convert::TryInto; + +use crate::constants::*; +use crate::errors::*; + +#[cfg(feature = "async-futures")] +use futures::io::{AsyncWrite, AsyncWriteExt}; + +/// Lead of an rpm header. +/// +/// Used to contain valid data, now only a very limited subset is used +/// and the remaining data is set to fixed values such that compatibility is kept. +/// Only the "magic number" is still relevant as it is used to detect rpm files. +#[derive(Eq)] +pub struct Lead { + magic: [u8; 4], + major: u8, + minor: u8, + package_type: u16, + arch: u16, + name: [u8; 66], + os: u16, + signature_type: u16, + reserved: [u8; 16], +} + +impl std::fmt::Debug for Lead { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let name = String::from_utf8_lossy(&self.name); + f.debug_struct("Lead") + .field("magic", &self.magic) + .field("major", &self.major) + .field("minor", &self.minor) + .field("package_type", &self.package_type) + .field("arch", &self.arch) + .field("name", &name) + .field("os", &self.os) + .field("signature_type", &self.signature_type) + .field("reserved", &self.reserved) + .finish() + } +} + +impl Lead { + pub(crate) fn parse(input: &[u8]) -> Result { + let (rest, magic) = complete::take(4usize)(input)?; + for i in 0..magic.len() { + if magic[i] != RPM_MAGIC[i] { + return Err(RPMError::InvalidMagic { + expected: RPM_MAGIC[i], + actual: magic[i], + complete_input: input.to_vec(), + }); + } + } + let (rest, major) = be_u8(rest)?; + let (rest, minor) = be_u8(rest)?; + let (rest, pkg_type) = be_u16(rest)?; + let (rest, arch) = be_u16(rest)?; + let (rest, name) = complete::take(66usize)(rest)?; + let (rest, os) = be_u16(rest)?; + let (rest, sigtype) = be_u16(rest)?; + + let mut name_arr: [u8; 66] = [0; 66]; + name_arr.copy_from_slice(name); + + Ok(Lead { + magic: RPM_MAGIC, + major, + minor, + package_type: pkg_type, + arch, + name: name_arr, + os, + signature_type: sigtype, + reserved: rest.try_into().unwrap(), // safe unwrap here since we've checked length of slices. + }) + } + #[cfg(feature = "async-futures")] + pub(crate) async fn write_async( + &self, + out: &mut W, + ) -> Result<(), RPMError> { + out.write_all(&self.magic).await?; + out.write_all(&self.major.to_be_bytes()).await?; + out.write_all(&self.minor.to_be_bytes()).await?; + out.write_all(&self.package_type.to_be_bytes()).await?; + out.write_all(&self.arch.to_be_bytes()).await?; + out.write_all(&self.name).await?; + out.write_all(&self.os.to_be_bytes()).await?; + out.write_all(&self.signature_type.to_be_bytes()).await?; + out.write_all(&self.reserved).await?; + Ok(()) + } + + pub(crate) fn write(&self, out: &mut W) -> Result<(), RPMError> { + out.write_all(&self.magic)?; + out.write_all(&self.major.to_be_bytes())?; + out.write_all(&self.minor.to_be_bytes())?; + out.write_all(&self.package_type.to_be_bytes())?; + out.write_all(&self.arch.to_be_bytes())?; + out.write_all(&self.name)?; + out.write_all(&self.os.to_be_bytes())?; + out.write_all(&self.signature_type.to_be_bytes())?; + out.write_all(&self.reserved)?; + Ok(()) + } + + pub(crate) fn new(name: &str) -> Self { + let mut name_arr = [0; 66]; + // the last byte needs to be the null terminator + let name_size = std::cmp::min(name_arr.len() - 1, name.len()); + + name_arr[..name_size].clone_from_slice(&name.as_bytes()[..name_size]); + Lead { + magic: RPM_MAGIC, + major: 3, + minor: 0, + package_type: 0, + arch: 0, + name: name_arr, + os: 1, + signature_type: 5, + reserved: [0; 16], + } + } +} + +impl PartialEq for Lead { + fn eq(&self, other: &Lead) -> bool { + for i in 0..self.name.len() { + if other.name[i] != self.name[i] { + return false; + } + } + self.magic == other.magic + && self.major == other.major + && self.minor == other.minor + && self.package_type == other.package_type + && self.arch == other.arch + && self.os == other.os + && self.signature_type == other.signature_type + && self.reserved == other.reserved + } +} diff --git a/rdnf/rpm/src/rpm/headers/mod.rs b/rdnf/rpm/src/rpm/headers/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2befd82c89c050e93d9bf616e296da4344494bcf --- /dev/null +++ b/rdnf/rpm/src/rpm/headers/mod.rs @@ -0,0 +1,13 @@ +mod header; +mod lead; +mod types; + +pub use header::*; +pub(crate) use lead::*; +pub use types::*; + +#[cfg(feature = "signature-meta")] +mod signature_builder; + +#[cfg(feature = "signature-meta")] +pub use signature_builder::*; diff --git a/rdnf/rpm/src/rpm/headers/signature_builder.rs b/rdnf/rpm/src/rpm/headers/signature_builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..0232f47ff91b57e08b23a1f96e8c8709d6ce224c --- /dev/null +++ b/rdnf/rpm/src/rpm/headers/signature_builder.rs @@ -0,0 +1,156 @@ +//! signature index construction as builder pattern + +use super::*; + +use super::IndexEntry; +use crate::constants::*; +use std::default::Default; + +/// A marker trait for builder stages +pub trait ConstructionStage {} + +/// Initial empty builder. +pub struct Empty; +/// Builder beyond the empty stage, already containing a digest. +/// +/// Implies that headers and content are complete. +pub struct WithDigest; + +/// Builder already has a hash and is ready for completion. +pub struct WithSignature; + +impl ConstructionStage for Empty {} + +impl ConstructionStage for WithDigest {} + +impl ConstructionStage for WithSignature {} + +/// base signature header builder +/// +/// T describes the stage and can be one of `Empty`, `WithDigest`, `WithSignature` +pub struct SignatureHeaderBuilder +where + T: ConstructionStage, +{ + entries: Vec>, + phantom: std::marker::PhantomData, +} + +impl SignatureHeaderBuilder { + pub fn new() -> Self { + Self { + entries: Vec::with_capacity(10), + phantom: Default::default(), + } + } +} + +impl Default for SignatureHeaderBuilder { + fn default() -> Self { + Self::new() + } +} + +impl SignatureHeaderBuilder +where + T: ConstructionStage, +{ + /// Construct the complete signature header. + pub fn build(mut self, headers_plus_payload_size: u32) -> Header { + self.entries.insert( + 0, + IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_SIZE, + 0i32, // externally filled + IndexData::Int32(vec![headers_plus_payload_size]), + ), + ); + + Header::::from_entries( + self.entries, + IndexSignatureTag::HEADER_SIGNATURES, + ) + } +} + +impl SignatureHeaderBuilder { + /// add a digest over the header and a signature across header and source excluding the static lead + pub fn add_digest( + mut self, + digest_header_only: &str, + digest_header_and_archive: &[u8], + ) -> SignatureHeaderBuilder { + let offset = 0i32; // filled externally later on + self.entries.push(IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_MD5, + offset, + IndexData::Bin(digest_header_and_archive.to_vec()), + )); + self.entries.push(IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_SHA1, + offset, + IndexData::StringTag(digest_header_only.to_string()), + )); + SignatureHeaderBuilder:: { + entries: self.entries, + phantom: Default::default(), + } + } +} + +impl SignatureHeaderBuilder { + /// add a signature over the header and a signature across header and source excluding the static lead + pub fn add_signature( + mut self, + rsa_sig_header_only: &[u8], + rsa_sig_header_and_archive: &[u8], + ) -> SignatureHeaderBuilder { + let offset = 0i32; // filled externally later on + self.entries.push(IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_RSA, + offset, + IndexData::Bin(rsa_sig_header_only.to_vec()), + )); + self.entries.push(IndexEntry::new( + IndexSignatureTag::RPMSIGTAG_PGP, + offset, + IndexData::Bin(rsa_sig_header_and_archive.to_vec()), + )); + SignatureHeaderBuilder:: { + entries: self.entries, + phantom: Default::default(), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + #[test] + fn signature_builder() { + let builder = SignatureHeaderBuilder::::new(); + + let rsa_sig_header_only = [0u8; 32]; + let rsa_sig_header_and_archive = [0u8; 32]; + let _digest_header_only = [0u8; 64]; + let digest_header_and_archive = [0u8; 64]; + + let header = builder + .add_digest("", &digest_header_and_archive[..]) + .add_signature(&rsa_sig_header_only[..], &rsa_sig_header_and_archive[..]) + .build(32); + + assert!(header + .find_entry_or_err(&IndexSignatureTag::RPMSIGTAG_RSA) + .is_ok()); + assert!(header + .find_entry_or_err(&IndexSignatureTag::RPMSIGTAG_PGP) + .is_ok()); + assert!(header + .find_entry_or_err(&IndexSignatureTag::RPMSIGTAG_MD5) + .is_ok()); + assert!(header + .find_entry_or_err(&IndexSignatureTag::RPMSIGTAG_SHA1) + .is_ok()); + } +} diff --git a/rdnf/rpm/src/rpm/headers/types.rs b/rdnf/rpm/src/rpm/headers/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..b75534bb17051101c5477dd7abdcb401d74db72d --- /dev/null +++ b/rdnf/rpm/src/rpm/headers/types.rs @@ -0,0 +1,414 @@ +//! A collection of types used in various header records. +use crate::{constants::*, errors}; + +/// Describes a file present in the rpm file. +pub struct RPMFileEntry { + pub(crate) size: u32, + pub(crate) mode: FileMode, + pub(crate) modified_at: u32, + pub(crate) sha_checksum: String, + pub(crate) link: String, + pub(crate) flag: u32, // @todo: + pub(crate) user: String, + pub(crate) group: String, + pub(crate) base_name: String, + #[allow(unused)] + pub(crate) dir: String, + pub(crate) content: Vec, +} + +#[non_exhaustive] +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub enum FileMode { + // It does not really matter if we use u16 or i16 since all we care about + // is the bit representation which is the same for both. + Dir { permissions: u16 }, + Regular { permissions: u16 }, + // For "Invalid" we use a larger integer since it is possible to create an invalid + // FileMode by providing an overflowing integer. + Invalid { raw_mode: i32, reason: &'static str }, +} + +// there are more file types but in the context of RPM, only regular and directory should be relevant. +// See section "The file type and mode" +const FILE_TYPE_BIT_MASK: u16 = 0o170000; // bit representation = "1111000000000000" +const PERMISSIONS_BIT_MASK: u16 = 0o7777; // bit representation = "0000111111111111" +const REGULAR_FILE_TYPE: u16 = 0o100000; // bit representation = "1000000000000000" +const DIR_FILE_TYPE: u16 = 0o040000; // bit representation = "0100000000000000" + +// @todo: +impl From for FileMode { + fn from(raw_mode: u16) -> Self { + // example + // 1111000000000000 (0o170000) <- file type bit mask + // &1000000111101101 (0o100755) <- regular executable file + // ----------------------------- + // 1000000000000000 (0o100000) <- type for regular files + // + // we effectively extract the file type bits with an AND operation. + // Here are two links for a quick refresh: + // + // + let file_type = raw_mode & FILE_TYPE_BIT_MASK; + let permissions = raw_mode & PERMISSIONS_BIT_MASK; + match file_type { + DIR_FILE_TYPE => FileMode::Dir { permissions }, + REGULAR_FILE_TYPE => FileMode::Regular { permissions }, + _ => FileMode::Invalid { + raw_mode: raw_mode as i32, + reason: "unknown file type", + }, + } + } +} + +impl From for FileMode { + fn from(raw_mode: i32) -> Self { + // since we ultimately only deal with 16bit integers + // we need to check if a safe conversion to 16bit is doable. + if raw_mode > u16::MAX.into() || raw_mode < i16::MIN.into() { + FileMode::Invalid { + raw_mode, + reason: "provided integer is out of 16bit bounds", + } + } else { + FileMode::from(raw_mode as u16) + } + } +} + +impl FileMode { + /// Create a new Regular instance. `permissions` can be between 0 and 0o7777. Values greater will be set to 0o7777. + pub fn regular(permissions: u16) -> Self { + FileMode::Regular { + permissions: permissions & PERMISSIONS_BIT_MASK, + } + } + + /// Create a new Dir instance. `permissions` can be between 0 and 0o7777. Values greater will be set to 0o7777. + pub fn dir(permissions: u16) -> Self { + FileMode::Dir { + permissions: permissions & PERMISSIONS_BIT_MASK, + } + } + + /// Usually this should be done with TryFrom, but since we already have a `From` implementation, + /// we run into this issue: + pub fn try_from_raw(raw: i32) -> Result { + let mode: FileMode = raw.into(); + mode.to_result() + } + + /// Turns this FileMode into a result. If the mode is Invalid, it will be converted into + /// RPMError::InvalidFileMode. Otherwise it is Ok(self). + pub fn to_result(self) -> Result { + match self { + Self::Invalid { raw_mode, reason } => { + Err(errors::RPMError::InvalidFileMode { raw_mode, reason }) + } + _ => Ok(self), + } + } + + /// Returns the complete file mode (type and permissions) + pub fn raw_mode(&self) -> u16 { + match self { + Self::Dir { permissions } | Self::Regular { permissions } => { + *permissions | self.file_type() + } + Self::Invalid { + raw_mode, + reason: _, + } => *raw_mode as u16, + } + } + + pub fn file_type(&self) -> u16 { + match self { + Self::Dir { permissions: _ } => DIR_FILE_TYPE, + Self::Regular { permissions: _ } => REGULAR_FILE_TYPE, + Self::Invalid { + raw_mode, + reason: _, + } => *raw_mode as u16 & FILE_TYPE_BIT_MASK, + } + } + + pub fn permissions(&self) -> u16 { + match self { + Self::Dir { permissions } | Self::Regular { permissions } => *permissions, + Self::Invalid { + raw_mode, + reason: _, + } => *raw_mode as u16 & PERMISSIONS_BIT_MASK, + } + } +} + +impl From for u32 { + fn from(mode: FileMode) -> Self { + mode.raw_mode() as u32 + } +} + +impl From for u16 { + fn from(mode: FileMode) -> Self { + mode.raw_mode() + } +} + +/// Description of file modes. +/// +/// A subset +pub struct RPMFileOptions { + pub(crate) destination: String, + pub(crate) user: String, + pub(crate) group: String, + pub(crate) symlink: String, + pub(crate) mode: FileMode, + pub(crate) flag: u32, + pub(crate) inherit_permissions: bool, +} + +impl RPMFileOptions { + #[allow(clippy::new_ret_no_self)] + pub fn new>(dest: T) -> RPMFileOptionsBuilder { + RPMFileOptionsBuilder { + inner: RPMFileOptions { + destination: dest.into(), + user: "root".to_string(), + group: "root".to_string(), + symlink: "".to_string(), + mode: FileMode::regular(0o664), + flag: 0, + inherit_permissions: true, + }, + } + } +} + +pub struct RPMFileOptionsBuilder { + inner: RPMFileOptions, +} + +impl RPMFileOptionsBuilder { + pub fn user>(mut self, user: T) -> Self { + self.inner.user = user.into(); + self + } + + pub fn group>(mut self, group: T) -> Self { + self.inner.group = group.into(); + self + } + + pub fn symlink>(mut self, symlink: T) -> Self { + self.inner.symlink = symlink.into(); + self + } + + pub fn mode>(mut self, mode: T) -> Self { + self.inner.mode = mode.into(); + self.inner.inherit_permissions = false; + self + } + + pub fn is_doc(mut self) -> Self { + self.inner.flag = RPMFILE_DOC; + self + } + + pub fn is_config(mut self) -> Self { + self.inner.flag = RPMFILE_CONFIG; + self + } +} + +impl From for RPMFileOptions { + fn from(builder: RPMFileOptionsBuilder) -> Self { + builder.inner + } +} + +/// Description of a dependency as present in a RPM header record. +pub struct Dependency { + pub(crate) dep_name: String, + pub(crate) sense: u32, + pub(crate) version: String, +} + +impl Dependency { + pub fn less(dep_name: T, version: E) -> Self + where + T: Into, + E: Into, + { + Self::new(dep_name.into(), RPMSENSE_LESS, version.into()) + } + + pub fn less_eq(dep_name: T, version: E) -> Self + where + T: Into, + E: Into, + { + Self::new( + dep_name.into(), + RPMSENSE_LESS | RPMSENSE_EQUAL, + version.into(), + ) + } + + pub fn eq(dep_name: T, version: E) -> Self + where + T: Into, + E: Into, + { + Self::new(dep_name.into(), RPMSENSE_EQUAL, version.into()) + } + + pub fn greater(dep_name: T, version: E) -> Self + where + T: Into, + E: Into, + { + Self::new(dep_name.into(), RPMSENSE_GREATER, version.into()) + } + + pub fn greater_eq(dep_name: T, version: E) -> Self + where + T: Into, + E: Into, + { + Self::new( + dep_name.into(), + RPMSENSE_GREATER | RPMSENSE_EQUAL, + version.into(), + ) + } + + pub fn any(dep_name: T) -> Self + where + T: Into, + { + Self::new(dep_name.into(), RPMSENSE_ANY, "".to_string()) + } + + pub fn rpmlib(dep_name: T, version: E) -> Self + where + T: Into, + E: Into, + { + Self::new( + dep_name.into(), + RPMSENSE_RPMLIB | RPMSENSE_EQUAL, + version.into(), + ) + } + + fn new(dep_name: String, sense: u32, version: String) -> Self { + Dependency { + dep_name, + sense, + version, + } + } +} + +mod test { + + #[test] + fn test_file_mode() -> Result<(), Box> { + use super::*; + + // test constructor functions + let test_table = vec![(0, 0), (0o7777, 0o7777), (0o17777, 0o7777)]; + for (permissions, expected) in test_table { + let result = FileMode::dir(permissions); + assert_eq!(expected, result.permissions()); + let result = FileMode::regular(permissions); + assert_eq!(expected, result.permissions()); + } + + let test_table = vec![ + (0o10_0664, Ok(FileMode::regular(0o664))), + (0o04_0665, Ok(FileMode::dir(0o665))), + // test sticky bit + (0o10_1664, Ok(FileMode::regular(0o1664))), + ( + 0o664, + Err(errors::RPMError::InvalidFileMode { + raw_mode: 0o664, + reason: "unknown file type", + }), + ), + ( + 0o27_1664, + Err(errors::RPMError::InvalidFileMode { + raw_mode: 0o27_1664, + reason: "provided integer is out of 16bit bounds", + }), + ), + ]; + + // test try_from_raw + for (testant, expected) in test_table { + let result = FileMode::try_from_raw(testant); + match (&expected, &result) { + (Ok(expected), Ok(actual)) => { + assert_eq!(expected, actual); + } + (Err(expected), Err(actual)) => { + if let errors::RPMError::InvalidFileMode { + raw_mode: actual_raw_mode, + reason: actual_reason, + } = actual + { + if let errors::RPMError::InvalidFileMode { + raw_mode: expected_raw_mode, + reason: expected_reason, + } = expected + { + assert_eq!(expected_raw_mode, actual_raw_mode); + assert_eq!(expected_reason, actual_reason); + } else { + unreachable!(); + } + } else { + panic!("invalid error type"); + } + } + _ => panic!("a and b not equal,{:?} vs {:?}", expected, result), + } + } + + // test into methods + let test_table = vec![ + (0o10_0755, FileMode::regular(0o0755), REGULAR_FILE_TYPE), + (0o10_1755, FileMode::regular(0o1755), REGULAR_FILE_TYPE), + (0o04_0755, FileMode::dir(0o0755), DIR_FILE_TYPE), + ( + 0o20_0755, + FileMode::Invalid { + raw_mode: 0o20_0755, + reason: "provided integer is out of 16bit bounds", + }, + 0, + ), + ( + 0o0755, + FileMode::Invalid { + raw_mode: 0o0755, + reason: "unknown file type", + }, + 0, + ), + ]; + for (raw_mode, expected_mode, expected_type) in test_table { + let mode = FileMode::from(raw_mode); + assert_eq!(expected_mode, mode); + assert_eq!(raw_mode as u16, mode.raw_mode()); + assert_eq!(expected_type, mode.file_type()); + } + Ok(()) + } +} diff --git a/rdnf/rpm/src/rpm/mod.rs b/rdnf/rpm/src/rpm/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..73dc25d56a5f4810656730b7def339d4277a5938 --- /dev/null +++ b/rdnf/rpm/src/rpm/mod.rs @@ -0,0 +1,15 @@ +mod builder; +mod compressor; +mod headers; +mod package; + +#[cfg(feature = "signature-meta")] +pub mod signature; + +pub use headers::*; + +pub use compressor::*; + +pub use package::*; + +pub use builder::*; diff --git a/rdnf/rpm/src/rpm/package.rs b/rdnf/rpm/src/rpm/package.rs new file mode 100644 index 0000000000000000000000000000000000000000..bb1ca02bd0e1596a7ab5943f06f5b96a6d80d2eb --- /dev/null +++ b/rdnf/rpm/src/rpm/package.rs @@ -0,0 +1,632 @@ +use std::io::BufReader; +use std::ops::Deref; +use std::path::{Path, PathBuf}; + +use chrono::offset::TimeZone; +#[cfg(feature = "async-futures")] +use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use num_traits::FromPrimitive; + +use super::headers::*; +use super::Lead; + +use crate::constants::*; +use crate::errors::*; + +#[cfg(feature = "signature-meta")] +use crate::sequential_cursor::SeqCursor; +#[cfg(feature = "signature-meta")] +use crate::signature; + +#[cfg(feature = "signature-meta")] +use std::io::Seek; + +/// A complete rpm file. +/// +/// Can either be created using the [`RPMPackageBuilder`](super::builder::RPMPackageBuilder) +/// or used with [`parse`](`self::RPMPackage::parse`) to obtain from a file. +#[derive(Debug)] +pub struct RPMPackage { + /// Header and metadata structures. + /// + /// Contains the constant lead as well as the metadata store. + pub metadata: RPMPackageMetadata, + /// The compressed or uncompressed files. + pub content: Vec, +} + +impl RPMPackage { + /// Open and parse a file at the provided path as an RPM package. + pub fn open>(path: P) -> Result { + let rpm_file = std::fs::File::open(path.as_ref())?; + let mut buf_reader = BufReader::new(rpm_file); + Self::parse(&mut buf_reader) + } + + #[cfg(feature = "async-futures")] + pub async fn parse_async(input: &mut I) -> Result { + let metadata = RPMPackageMetadata::parse_async(input).await?; + let mut content = Vec::new(); + input.read_to_end(&mut content).await?; + Ok(RPMPackage { metadata, content }) + } + + pub fn parse(input: &mut T) -> Result { + let metadata = RPMPackageMetadata::parse(input)?; + let mut content = Vec::new(); + input.read_to_end(&mut content)?; + Ok(RPMPackage { metadata, content }) + } + + pub fn write(&self, out: &mut W) -> Result<(), RPMError> { + self.metadata.write(out)?; + out.write_all(&self.content)?; + Ok(()) + } + + #[cfg(feature = "async-futures")] + pub async fn write_async(&self, out: &mut W) -> Result<(), RPMError> { + self.metadata.write_async(out).await?; + out.write_all(&self.content).await?; + Ok(()) + } + + // TODO allow passing an external signer/verifier + + /// sign all headers (except for the lead) using an external key and store it as the initial header + #[cfg(feature = "signature-meta")] + pub fn sign(&mut self, signer: S) -> Result<(), RPMError> + where + S: signature::Signing>, + { + use std::io::Read; + + // create a temporary byte repr of the header + // and re-create all hashes + + let mut header_bytes = Vec::::with_capacity(1024); + // make sure to not hash any previous signatures in the header + self.metadata.header.write(&mut header_bytes)?; + + let mut header_and_content_cursor = + SeqCursor::new(&[header_bytes.as_slice(), self.content.as_slice()]); + + let digest_md5 = { + use md5::Digest; + let mut hasher = md5::Md5::default(); + { + // avoid loading it into memory all at once + // since the content could be multiple 100s of MBs + let mut buf = [0u8; 256]; + while let Ok(n) = header_and_content_cursor.read(&mut buf[..]) { + if n == 0 { + break; + } + hasher.update(&buf[0..n]); + } + } + let hash_result = hasher.finalize(); + hash_result.to_vec() + }; + + header_and_content_cursor.rewind()?; + + let digest_sha1 = { + use sha1::Digest; + let mut hasher = sha1::Sha1::default(); + hasher.update(&header_bytes); + let digest = hasher.finalize(); + hex::encode(digest) + }; + + let rsa_signature_spanning_header_only = signer.sign(header_bytes.as_slice())?; + + let rsa_signature_spanning_header_and_archive = + signer.sign(&mut header_and_content_cursor)?; + + // NOTE: size stands for the combined size of header and payload. + self.metadata.signature = Header::::new_signature_header( + header_and_content_cursor + .len() + .try_into() + .expect("headers + payload can't be larger than 4gb"), + &digest_md5, + digest_sha1, + rsa_signature_spanning_header_only.as_slice(), + rsa_signature_spanning_header_and_archive.as_slice(), + ); + + Ok(()) + } + + /// Verify the signature as present within the RPM package. + /// + /// + #[cfg(feature = "signature-meta")] + pub fn verify_signature(&self, verifier: V) -> Result<(), RPMError> + where + V: signature::Verifying>, + { + // TODO retval should be SIGNATURE_VERIFIED or MISMATCH, not just an error + + let mut header_bytes = Vec::::with_capacity(1024); + self.metadata.header.write(&mut header_bytes)?; + + let signature_header_only = self + .metadata + .signature + .get_entry_data_as_binary(IndexSignatureTag::RPMSIGTAG_RSA)?; + + crate::signature::echo_signature("signature_header(header only)", signature_header_only); + + let signature_header_and_content = self + .metadata + .signature + .get_entry_data_as_binary(IndexSignatureTag::RPMSIGTAG_PGP)?; + + crate::signature::echo_signature( + "signature_header(header and content)", + signature_header_and_content, + ); + + verifier.verify(header_bytes.as_slice(), signature_header_only)?; + + let header_and_content_cursor = + SeqCursor::new(&[header_bytes.as_slice(), self.content.as_slice()]); + + verifier.verify(header_and_content_cursor, signature_header_and_content)?; + + Ok(()) + } +} + +#[derive(PartialEq, Debug)] +pub struct RPMPackageMetadata { + pub lead: Lead, + pub signature: Header, + pub header: Header, +} +impl Deref for RPMPackageMetadata { + type Target = Header; + + fn deref(&self) -> &Self::Target { + &self.header + } +} +impl Header { + #[inline] + pub fn is_source_package(&self) -> bool { + self.get_entry_data_as_u32(IndexTag::RPMTAG_SOURCEPACKAGE) + .is_ok() + } + + #[inline] + pub fn get_name(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_NAME) + } + + #[inline] + pub fn get_epoch(&self) -> Result { + self.get_entry_data_as_u32(IndexTag::RPMTAG_EPOCH) + } + + #[inline] + pub fn get_version(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_VERSION) + } + + #[inline] + pub fn get_release(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_RELEASE) + } + #[inline] + pub fn get_sig_md5(&self) -> Result<&[u8], RPMError> { + self.get_entry_data_as_binary(IndexTag::RPMTAG_SIGMD5) + } + #[inline] + pub fn get_arch(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_ARCH) + } + + #[inline] + pub fn get_vendor(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_VENDOR) + } + #[inline] + pub fn get_group(&self) -> Result<&[String], RPMError> { + self.get_entry_data_as_string_array(IndexTag::RPMTAG_GROUP) + } + #[inline] + pub fn get_url(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_URL) + } + + #[inline] + pub fn get_vcs(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_VCS) + } + + #[inline] + pub fn get_license(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_LICENSE) + } + + // TODO: internationalized strings + // get_summary, get_description, get_group + + #[inline] + pub fn get_packager(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_PACKAGER) + } + + #[inline] + pub fn get_build_time(&self) -> Result { + self.get_entry_data_as_u32(IndexTag::RPMTAG_BUILDTIME) + .map(|x| x as u64) + } + #[inline] + pub fn get_installed_size(&self) -> Result { + self.get_entry_data_as_u32(IndexTag::RPMTAG_SIZE) + .map(|x| x as u64) + } + #[inline] + pub fn get_archive_size(&self) -> Result { + self.get_entry_data_as_u32(IndexTag::RPMTAG_ARCHIVESIZE) + .map(|x| x as u64) + } + #[inline] + pub fn get_file_times(&self) -> Result, RPMError> { + self.get_entry_data_as_u32_array(IndexTag::RPMTAG_FILEMTIMES) + .map(|x| x.iter().map(|x| *x as u64).collect::>()) + } + #[inline] + pub fn get_build_host(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_BUILDHOST) + } + + #[inline] + pub fn get_source_rpm(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_SOURCERPM) + } + + #[inline] + pub fn get_summary(&self) -> Result<&[String], RPMError> { + self.get_entry_data_as_string_array(IndexTag::RPMTAG_SUMMARY) + } + + #[inline] + pub fn get_description(&self) -> Result<&[String], RPMError> { + self.get_entry_data_as_string_array(IndexTag::RPMTAG_DESCRIPTION) + } + // TODO: get_provides, get_requires, etc. + // TODO: get_header_byte_range + // TODO: get_archive_size, get_installed_size + + #[inline] + pub fn get_provides(&self) -> Result, RPMError> { + let names: &[String] = self.get_entry_data_as_string_array(IndexTag::RPMTAG_PROVIDENAME)?; + let flags = self.get_entry_data_as_u32_array(IndexTag::RPMTAG_PROVIDEFLAGS)?; + let versions = self.get_entry_data_as_string_array(IndexTag::RPMTAG_PROVIDEVERSION)?; + itertools::multizip((names, flags, versions)) + .try_fold::, _, Result<_, RPMError>>( + Vec::with_capacity(names.len()), + |mut acc, (name, flag_num, ver)| { + let flags = parse_flag(flag_num); + let (epoch, version, release) = parse_vers(ver); + acc.push(RpmEntry { + name: name.to_owned(), + flags, + epoch, + version, + release, + }); + Ok(acc) + }, + ) + } + + #[inline] + pub fn get_requires(&self) -> Result, RPMError> { + let names: &[String] = self.get_entry_data_as_string_array(IndexTag::RPMTAG_REQUIRENAME)?; + let flags: Vec = self.get_entry_data_as_u32_array(IndexTag::RPMTAG_REQUIREFLAGS)?; + let versions: &[String] = + self.get_entry_data_as_string_array(IndexTag::RPMTAG_REQUIREVERSION)?; + let provides = self.get_provides()?; + itertools::multizip((names, flags, versions)) + .try_fold::, _, Result<_, RPMError>>( + Vec::with_capacity(names.len()), + |mut acc, (name, flag_num, ver)| { + if flag_num & RPMSENSE_RPMLIB != RPMSENSE_RPMLIB { + match provides.iter().find(|s| s.name == *name) { + Some(_) => {} + None => { + let flags = parse_flag(flag_num); + let (epoch, version, release) = parse_vers(ver); + acc.push(RpmEntry { + name: name.to_owned(), + flags, + epoch, + version, + release, + }); + } + }; + } + Ok(acc) + }, + ) + } + #[inline] + pub fn get_conflicts(&self) -> Result, RPMError> { + let names: &[String] = + self.get_entry_data_as_string_array(IndexTag::RPMTAG_CONFLICTNAME)?; + let flags = self.get_entry_data_as_u32_array(IndexTag::RPMTAG_CONFLICTFLAGS)?; + let versions = self.get_entry_data_as_string_array(IndexTag::RPMTAG_CONFLICTVERSION)?; + itertools::multizip((names, flags, versions)) + .try_fold::, _, Result<_, RPMError>>( + Vec::with_capacity(names.len()), + |mut acc, (name, flag_num, ver)| { + let flags = parse_flag(flag_num); + let (epoch, version, release) = parse_vers(ver); + acc.push(RpmEntry { + name: name.to_owned(), + flags, + epoch, + version, + release, + }); + Ok(acc) + }, + ) + } + #[inline] + pub fn get_obsoletes(&self) -> Result, RPMError> { + let names: &[String] = + self.get_entry_data_as_string_array(IndexTag::RPMTAG_OBSOLETENAME)?; + let flags = self.get_entry_data_as_u32_array(IndexTag::RPMTAG_OBSOLETEFLAGS)?; + let versions = self.get_entry_data_as_string_array(IndexTag::RPMTAG_OBSOLETEVERSION)?; + itertools::multizip((names, flags, versions)) + .try_fold::, _, Result<_, RPMError>>( + Vec::with_capacity(names.len()), + |mut acc, (name, flag_num, ver)| { + let flags = parse_flag(flag_num); + let (epoch, version, release) = parse_vers(ver); + acc.push(RpmEntry { + name: name.to_owned(), + flags, + epoch, + version, + release, + }); + Ok(acc) + }, + ) + } + #[inline] + pub fn get_payload_format(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_PAYLOADFORMAT) + } + + #[inline] + pub fn get_payload_compressor(&self) -> Result<&str, RPMError> { + self.get_entry_data_as_string(IndexTag::RPMTAG_PAYLOADCOMPRESSOR) + } + + #[inline] + pub fn get_file_checksums(&self) -> Result<&[String], RPMError> { + self.get_entry_data_as_string_array(IndexTag::RPMTAG_FILEDIGESTS) + } + + /// Extract a the set of contained file names. + pub fn get_file_paths(&self) -> Result, RPMError> { + // reconstruct the messy de-constructed paths + let base = self.get_entry_data_as_string_array(IndexTag::RPMTAG_BASENAMES)?; + let biject = self.get_entry_data_as_u32_array(IndexTag::RPMTAG_DIRINDEXES)?; + let dirs = self.get_entry_data_as_string_array(IndexTag::RPMTAG_DIRNAMES)?; + + let n = dirs.len(); + let v = base + .iter() + .zip(biject.into_iter()) + .try_fold::, _, _>( + Vec::::with_capacity(base.len()), + |mut acc, item| { + let (base, dir_index) = item; + if let Some(dir) = dirs.get(dir_index as usize) { + acc.push(PathBuf::from(dir).join(base)); + Ok(acc) + } else { + Err(RPMError::InvalidTagIndex { + tag: IndexTag::RPMTAG_DIRINDEXES.to_string(), + index: dir_index, + bound: n as u32, + }) + } + }, + )?; + Ok(v) + } + + /// The digest algorithm used per file. + /// + /// Note that this is not necessarily the same as the digest + /// used for headers. + pub fn get_file_digest_algorithm(&self) -> Result { + self.get_entry_data_as_u32(IndexTag::RPMTAG_FILEDIGESTALGO) + .and_then(|x| { + FileDigestAlgorithm::from_u32(x).ok_or_else(|| { + RPMError::InvalidTagValueEnumVariant { + tag: IndexTag::RPMTAG_FILEDIGESTALGO.to_string(), + variant: x, + } + }) + }) + } + + /// Extract a the set of contained file names including the additional metadata. + pub fn get_file_entries(&self) -> Result, RPMError> { + // rpm does not encode it, if it is the default md5 + let algorithm = self.get_file_digest_algorithm().unwrap_or_default(); + // + let modes = self.get_entry_data_as_u16_array(IndexTag::RPMTAG_FILEMODES)?; + let users = self.get_entry_data_as_string_array(IndexTag::RPMTAG_FILEUSERNAME)?; + let groups = self.get_entry_data_as_string_array(IndexTag::RPMTAG_FILEGROUPNAME)?; + let digests = self.get_entry_data_as_string_array(IndexTag::RPMTAG_FILEDIGESTS)?; + let mtimes = self.get_entry_data_as_u32_array(IndexTag::RPMTAG_FILEMTIMES)?; + let sizes = self + .get_entry_data_as_u64_array(IndexTag::RPMTAG_LONGFILESIZES) + .or_else(|_e| { + self.get_entry_data_as_u32_array(IndexTag::RPMTAG_FILESIZES) + .map(|file_sizes| { + file_sizes + .into_iter() + .map(|file_size| file_size as _) + .collect::>() + }) + })?; + let flags = self.get_entry_data_as_u32_array(IndexTag::RPMTAG_FILEFLAGS)?; + // @todo + // let caps = self.get_entry_i32_array_data(IndexTag::RPMTAG_FILECAPS)?; + + let paths = self.get_file_paths()?; + let n = paths.len(); + + let v = itertools::multizip(( + paths.into_iter(), + users, + groups, + modes, + digests, + mtimes, + sizes, + flags, + )) + .try_fold::, _, Result<_, RPMError>>( + Vec::with_capacity(n), + |mut acc, (path, user, group, mode, digest, mtime, size, flags)| { + let digest = if digest.is_empty() { + None + } else { + Some(FileDigest::load_from_str(algorithm, digest)?) + }; + let utc = chrono::Utc; + acc.push(FileEntry { + path, + ownership: FileOwnership { + user: user.to_owned(), + group: group.to_owned(), + }, + mode: mode.into(), + modified_at: utc.timestamp_opt(mtime as i64, 0u32).unwrap(), // shouldn't fail as we are using 0 nanoseconds + digest, + category: FileCategory::from_u32(flags).unwrap_or_default(), + size: size as usize, + }); + Ok(acc) + }, + )?; + Ok(v) + } +} +impl RPMPackageMetadata { + #[cfg(feature = "async-futures")] + pub async fn parse_async(input: &mut T) -> Result { + let mut lead_buffer = [0; LEAD_SIZE]; + input.read_exact(&mut lead_buffer).await?; + let lead = Lead::parse(&lead_buffer)?; + let signature_header = Header::parse_signature_async(input).await?; + let header = Header::parse_async(input).await?; + Ok(RPMPackageMetadata { + lead, + signature: signature_header, + header, + }) + } + + pub(crate) fn parse(input: &mut T) -> Result { + let mut lead_buffer = [0; LEAD_SIZE]; + input.read_exact(&mut lead_buffer)?; + let lead = Lead::parse(&lead_buffer)?; + let signature_header = Header::parse_signature(input)?; + let header = Header::parse(input)?; + Ok(RPMPackageMetadata { + lead, + signature: signature_header, + header, + }) + } + + pub(crate) fn write(&self, out: &mut W) -> Result<(), RPMError> { + self.lead.write(out)?; + self.signature.write_signature(out)?; + self.header.write(out)?; + Ok(()) + } + + #[cfg(feature = "async-futures")] + pub async fn write_async(&self, out: &mut W) -> Result<(), RPMError> { + self.lead.write_async(out).await?; + self.signature.write_signature_async(out).await?; + self.header.write_async(out).await?; + Ok(()) + } + #[inline] + pub fn get_file_ima_signatures(&self) -> Result<&[String], RPMError> { + self.signature + .get_entry_data_as_string_array(IndexSignatureTag::RPMSIGTAG_FILESIGNATURES) + } + #[inline] + pub fn get_md5(&self) -> Result<&[u8], RPMError> { + self.signature + .get_entry_data_as_binary(IndexSignatureTag::RPMSIGTAG_MD5) + } +} +#[inline] +pub fn parse_flag(flag_num: u32) -> Option { + let flag_str = if flag_num & RPMSENSE_LESS == RPMSENSE_LESS { + if flag_num & RPMSENSE_EQUAL == RPMSENSE_EQUAL { + "LE" + } else { + "LT" + } + } else if flag_num & RPMSENSE_GREATER == RPMSENSE_GREATER { + if flag_num & RPMSENSE_EQUAL == RPMSENSE_EQUAL { + "GE" + } else { + "GT" + } + } else if flag_num & RPMSENSE_EQUAL == RPMSENSE_EQUAL { + "EQ" + } else { + return None; + }; + return Some(flag_str.to_string()); +} +#[inline] +pub fn parse_vers(ver: &str) -> (Option, Option, Option) { + let mut epoch = None; + let mut version = None; + let mut release = None; + if ver != "" { + let rest = match ver.split_once(":") { + Some((e, rest)) => { + epoch = Some(e.to_string()); + rest + } + None => { + epoch = Some("0".to_string()); + ver + } + }; + match rest.split_once("-") { + Some((v, r)) => { + version = Some(v.to_string()); + release = Some(r.to_string()); + } + None => { + version = Some(rest.to_string()); + } + } + } + (epoch, version, release) +} diff --git a/rdnf/rpm/src/rpm/signature/mod.rs b/rdnf/rpm/src/rpm/signature/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..8be6e7a9495d0df2262a0d3124c5def35060a459 --- /dev/null +++ b/rdnf/rpm/src/rpm/signature/mod.rs @@ -0,0 +1,19 @@ +mod traits; +pub use self::traits::*; + +#[cfg(feature = "signature-pgp")] +pub mod pgp; + +/// test helper to print signatures +pub fn echo_signature(scope: &str, signature: &[u8]) { + log::debug!( + "{}: [len={}] [{:#04X?}, {:#04X?}, {:#04X?}, {:#04X?}, {:#04X?}, ...]", + scope, + signature.len(), + signature[0], + signature[1], + signature[2], + signature[3], + signature[4] + ); +} diff --git a/rdnf/rpm/src/rpm/signature/pgp.rs b/rdnf/rpm/src/rpm/signature/pgp.rs new file mode 100644 index 0000000000000000000000000000000000000000..dae1659e2eddecd439b94ebafa5c5201a1f78008 --- /dev/null +++ b/rdnf/rpm/src/rpm/signature/pgp.rs @@ -0,0 +1,449 @@ +use super::traits; +use crate::errors::RPMError; + +use std::io::{Cursor, Read}; + +use ::pgp::{composed::Deserializable, types::KeyTrait}; + +use ::pgp::packet::*; + +fn now() -> ::chrono::DateTime<::chrono::Utc> { + // accuracy of serialized format is only down to seconds + use ::chrono::offset::TimeZone; + let now = ::chrono::offset::Utc::now(); + ::chrono::offset::Utc + .timestamp_opt(now.timestamp(), 0u32) + .unwrap() // shouldn't fail as we are using 0 nanoseconds. +} + +/// Signer implementation using the `pgp` crate. +/// +/// Note that this only supports ascii armored key files +/// commonly with the file extension `.asc` as generated +/// by i.e. `gpg`. +#[derive(Clone, Debug)] +pub struct Signer { + secret_key: ::pgp::composed::signed_key::SignedSecretKey, +} + +impl traits::Signing for Signer { + type Signature = Vec; + + /// Despite the fact the API suggest zero copy pattern, + /// it internally creates a copy until crate `pgp` provides + /// a `Read` based implementation. + fn sign(&self, data: R) -> Result { + let passwd_fn = String::new; + + let now = now(); + + let sig_cfg = SignatureConfig { + version: SignatureVersion::V4, + typ: SignatureType::Binary, + pub_alg: ::pgp::crypto::public_key::PublicKeyAlgorithm::RSA, + hash_alg: ::pgp::crypto::hash::HashAlgorithm::SHA2_256, + issuer: Some(self.secret_key.key_id()), + created: Some(now), + unhashed_subpackets: vec![], + hashed_subpackets: vec![ + Subpacket::critical(SubpacketData::SignatureCreationTime(now)), + Subpacket::critical(SubpacketData::Issuer(self.secret_key.key_id())), + //::pgp::packet::Subpacket::SignersUserID("rpm"), TODO this would be a nice addition + ], + }; + + let signature_packet = sig_cfg + .sign(&self.secret_key, passwd_fn, data) + .map_err(|e| RPMError::SignError(Box::new(e)))?; + + let mut signature_bytes = Vec::with_capacity(1024); + let mut cursor = Cursor::new(&mut signature_bytes); + ::pgp::packet::write_packet(&mut cursor, &signature_packet) + .map_err(|e| RPMError::SignError(Box::new(e)))?; + + Ok(signature_bytes) + } +} + +impl Signer { + /// load the private key for signing + pub fn load_from_asc_bytes(input: &[u8]) -> Result { + // only asc loading is supported right now + let input = ::std::str::from_utf8(input).map_err(|e| RPMError::KeyLoadError { + source: Box::new(e), + details: "Failed to parse bytes as utf8 for ascii armored parsing", + })?; + Self::load_from_asc(input) + } + + pub fn load_from_asc(input: &str) -> Result { + let (secret_key, _) = ::pgp::composed::signed_key::SignedSecretKey::from_string(input) + .map_err(|e| RPMError::KeyLoadError { + source: Box::new(e), + details: "Failed to parse bytes as ascii armored key", + })?; + Ok(Self { secret_key }) + } +} + +/// Verifier implementation using the `pgp` crate. +/// +/// Note that this only supports ascii armored key files +/// commonly with the file extension `.asc` as generated +/// by i.e. `gpg`. +#[derive(Clone, Debug)] +pub struct Verifier { + public_key: ::pgp::composed::signed_key::SignedPublicKey, +} + +impl Verifier { + fn parse_signature(signature: &[u8]) -> Result<::pgp::packet::Signature, RPMError> { + let mut cursor = Cursor::new(signature); + let parser = ::pgp::packet::PacketParser::new(&mut cursor); + let signature = parser + .filter_map(|res| match res { + Ok(::pgp::packet::Packet::Signature(sig_packet)) => Some(sig_packet), + _ => None, + }) + .next() + .ok_or(RPMError::NoSignatureFound)?; + Ok(signature) + } +} + +impl traits::Verifying for Verifier { + type Signature = Vec; + /// Despite the fact the API suggest zero copy pattern, + /// it internally creates a copy until crate `pgp` provides + /// a `Read` based implementation. + fn verify(&self, mut data: R, signature: &[u8]) -> Result<(), RPMError> { + let signature = Self::parse_signature(signature)?; + + log::debug!("Signature issued by: {:?}", signature.issuer()); + + if let Some(key_id) = signature.issuer() { + log::trace!("Signature has issuer ref: {:?}", key_id); + + if self.public_key.key_id() == *key_id { + return signature.verify(&self.public_key, data).map_err(|e| { + RPMError::VerificationError { + source: Box::new(e), + key_ref: format!("{:?}", key_id), + } + }); + } else { + log::trace!( + "Signature issuer key id {:?} does not match primary keys key id: {:?}", + key_id, + self.public_key.key_id() + ); + } + + self.public_key + .public_subkeys + .iter() + .filter(|sub_key| { + if sub_key.key_id().as_ref() == key_id.as_ref() { + log::trace!( + "Found a matching key id {:?} == {:?}", + sub_key.key_id(), + key_id + ); + true + } else { + log::trace!("Not the one we want: {:?}", sub_key); + false + } + }) + .fold( + Err(RPMError::KeyNotFoundError { + key_ref: format!("{:?}", key_id), + }), + |previous_res, sub_key| { + if previous_res.is_err() { + log::trace!("Test next candidate subkey"); + signature.verify(sub_key, &mut data).map_err(|e| { + RPMError::VerificationError { + source: Box::new(e), + key_ref: format!("{:?}", sub_key.key_id()), + } + }) + } else { + log::trace!("Signature already verified, nop"); + Ok(()) + } + }, + ) + } else { + log::trace!( + "Signature has no issuer ref, attempting primary key: {:?}", + self.public_key.primary_key.key_id() + ); + signature + .verify(&self.public_key, data) + .map_err(|e| RPMError::VerificationError { + source: Box::new(e), + key_ref: format!("{:?}", self.public_key.key_id()), + }) + } + } +} + +impl Verifier { + pub fn load_from_asc_bytes(input: &[u8]) -> Result { + // only asc loading is supported right now + let input = ::std::str::from_utf8(input).map_err(|e| RPMError::KeyLoadError { + source: Box::new(e), + details: "Failed to parse bytes as utf8 for ascii armored parsing", + })?; + Self::load_from_asc(input) + } + + pub fn load_from_asc(input: &str) -> Result { + let (public_key, _) = ::pgp::composed::signed_key::SignedPublicKey::from_string(input) + .map_err(|e| RPMError::KeyLoadError { + source: Box::new(e), + details: "Failed to parse bytes as ascii armored key", + })?; + + Ok(Self { public_key }) + } +} + +#[cfg(test)] +pub(crate) mod test { + + use super::super::{echo_signature, Signing, Verifying}; + use super::*; + + use super::Signer; + use super::Verifier; + + fn prep() -> (Signer, Verifier) { + let _ = env_logger::try_init(); + let (signing_key, verification_key) = load_asc_keys(); + let verifier = + Verifier::load_from_asc_bytes(verification_key.as_slice()).expect("PK parsing failed"); + let signer = + Signer::load_from_asc_bytes(signing_key.as_slice()).expect("PK parsing failed"); + (signer, verifier) + } + + /// Load a pair of sample keys. + pub(crate) fn load_asc_keys() -> (Vec, Vec) { + let signing_key = include_bytes!("../../../test_assets/secret_key.asc"); + let verification_key = include_bytes!("../../../test_assets/public_key.asc"); + (signing_key.to_vec(), verification_key.to_vec()) + } + + #[test] + fn parse_asc() { + // assert `prep()` itself is sane + let (signing_key, verification_key) = load_asc_keys(); + assert!(Signer::load_from_asc_bytes(signing_key.as_ref()).is_ok()); + assert!(Verifier::load_from_asc_bytes(verification_key.as_ref()).is_ok()); + } + + use std::io::Cursor; + + #[test] + fn sign_verify_roundtrip() { + let data = b"dfsdfjsd9ivnq320348934752312308205723900000580134850sdf"; + let mut cursor = Cursor::new(&data[..]); + + let (signer, verifier) = prep(); + + let signature = signer.sign(&mut cursor).expect("signed"); + let signature = signature.as_slice(); + { + // just to see if the previous already failed or not + let _packet = + Verifier::parse_signature(signature).expect("Created signature should be parsable"); + } + + echo_signature("test/roundtrip", signature); + + let mut cursor = Cursor::new(&data[..]); + verifier + .verify(&mut cursor, signature) + .expect("failed to verify just signed signature"); + } + + #[test] + fn verify_pgp_crate() { + use ::pgp::types::{PublicKeyTrait, SecretKeyTrait}; + + let (signer, verifier) = prep(); + let (signing_key, verification_key) = { (signer.secret_key, verifier.public_key) }; + + let passwd_fn = String::new; + + let digest = &RPM_SHA2_256[..]; + + // stage 1: verify created signature is fine + let signature = signing_key + .create_signature(passwd_fn, ::pgp::crypto::HashAlgorithm::SHA2_256, digest) + .expect("Failed to crate signature"); + + verification_key + .verify_signature(::pgp::crypto::HashAlgorithm::SHA2_256, digest, &signature) + .expect("Failed to validate signature"); + + // stage 2: check parsing success + // + let wrapped = ::pgp::Signature::new( + ::pgp::types::Version::Old, + ::pgp::packet::SignatureVersion::V4, + ::pgp::packet::SignatureType::Binary, + ::pgp::crypto::public_key::PublicKeyAlgorithm::RSA, + ::pgp::crypto::hash::HashAlgorithm::SHA2_256, + [digest[0], digest[1]], + signature, + vec![ + ::pgp::packet::Subpacket::SignatureCreationTime(now()), + ::pgp::packet::Subpacket::Issuer(signing_key.key_id()), + //::pgp::packet::Subpacket::SignersUserID("rpm"), TODO this would be a nice addition + ], + vec![], + ); + + let mut x = Vec::with_capacity(1024); + + let mut buff = Cursor::new(&mut x); + ::pgp::packet::write_packet(&mut buff, &wrapped).expect("Write should be ok"); + + log::debug!("{:02x?}", &x[0..15]); + + let signature = + Verifier::parse_signature(x.as_slice()).expect("There is a signature for sure"); + assert_eq!(signature, wrapped); + let signature = signature.signature; + verification_key + .verify_signature(::pgp::crypto::HashAlgorithm::SHA2_256, digest, &signature) + .expect("Verify must succeed"); + } + + #[test] + fn verify_pgp_crate2() { + let (signer, verifier) = prep(); + + let data = [1u8; 322]; + let data = &data[..]; + + let passwd_fn = String::new; + + let now = now(); + + let sig_cfg = SignatureConfig { + version: SignatureVersion::V4, + typ: SignatureType::Binary, + pub_alg: ::pgp::crypto::public_key::PublicKeyAlgorithm::RSA, + hash_alg: ::pgp::crypto::hash::HashAlgorithm::SHA2_256, + issuer: Some(signer.secret_key.key_id()), + created: Some(now), + unhashed_subpackets: vec![], + hashed_subpackets: vec![ + Subpacket::SignatureCreationTime(now), + Subpacket::Issuer(signer.secret_key.key_id()), + //::pgp::packet::Subpacket::SignersUserID("rpm"), TODO this would be a nice addition + ], + }; + + let signature_packet = sig_cfg + .sign(&signer.secret_key, passwd_fn, data) + .expect("Should sign"); + + signature_packet + .verify(&verifier.public_key, data) + .expect("Failed to validate signature"); + } + + #[test] + fn verify_subkeys_match() { + // verifies that all subkeys are present in both keys under test_assets + // which assures all other tests are sane + use std::collections::HashSet; + let (signer, verifier) = prep(); + let subkey_set = verifier.public_key.public_subkeys.iter().fold( + HashSet::with_capacity(signer.secret_key.public_subkeys.len()), + |mut acc, public_subkey| { + log::debug!("public subkeys in public key: {:?}", public_subkey.key_id()); + acc.insert(public_subkey.key_id().as_ref().to_vec()); + acc + }, + ); + signer + .secret_key + .secret_subkeys + .iter() + .for_each(|public_subkey| { + log::debug!("secret subkeys in secret key: {:?}", public_subkey.key_id()); + assert!(subkey_set.contains(public_subkey.key_id().as_ref())); + }); + } + + #[test] + fn static_parse_rpm_sign_signature() { + let _ = env_logger::try_init(); + + std::fs::write( + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("target") + .join("some.sig"), + &RPM_SIGN_SIGNATURE[..], + ) + .expect("Should be able to dump extracted signature"); + + let signature = &RPM_SIGN_SIGNATURE[..]; + let _signature = Verifier::parse_signature(signature).expect("It should load"); + } + + /// TODO fill with correct data + const RPM_SHA2_256: [u8; 32] = [ + 0xd9, 0x2b, 0xfe, 0x27, 0x6e, 0x31, 0x1a, 0x67, 0xfe, 0x12, 0x87, 0x68, 0xc5, 0xdf, 0x4d, + 0x06, 0xfd, 0x46, 0x1e, 0x04, 0x3a, 0xfd, 0xf8, 0x72, 0xba, 0x4c, 0x67, 0x9d, 0x86, 0x0d, + 0xb8, 0x1e, + ]; + + /// A sample signature extracted from rpm-sign using the test keys + /// + /// Should only be used for validating parsing. + const RPM_SIGN_SIGNATURE: [u8; 536] = [ + 0x89, 0x2, 0x15, 0x3, 0x5, 0x0, 0x5b, 0xe9, 0x8c, 0x5b, 0x24, 0xc6, 0xa8, 0xa7, 0xf4, 0xa8, + 0xe, 0xb5, 0x1, 0x8, 0xa8, 0x4c, 0xf, 0xfd, 0x1a, 0x9d, 0xe3, 0xf, 0x7e, 0xbb, 0x74, 0xe3, + 0x62, 0xef, 0xfd, 0x4d, 0x1c, 0x11, 0xa1, 0x68, 0x22, 0xd, 0xff, 0x4a, 0x72, 0x11, 0x18, + 0xe4, 0xb0, 0x46, 0x6b, 0x11, 0x82, 0xc6, 0xd4, 0xd6, 0xdb, 0x53, 0x64, 0x1b, 0x32, 0x33, + 0x41, 0x95, 0xf3, 0xc, 0xa6, 0xc2, 0x50, 0xee, 0x81, 0x81, 0x6a, 0x8, 0x5, 0xfa, 0x3b, + 0x26, 0x66, 0x63, 0x5c, 0xfa, 0x4b, 0x25, 0x2, 0xe7, 0xad, 0x3f, 0x4f, 0x82, 0x7a, 0xa3, + 0x4d, 0xad, 0xd, 0xa0, 0x19, 0x63, 0x77, 0xd2, 0x18, 0x30, 0x54, 0xc7, 0x14, 0x23, 0x22, + 0xb, 0xd, 0xd8, 0xba, 0x1b, 0x6c, 0x94, 0xb3, 0xf, 0xb3, 0x82, 0x18, 0x62, 0x33, 0x51, + 0x4e, 0xaa, 0xfa, 0x84, 0x8a, 0x4b, 0xcd, 0x82, 0x72, 0xf1, 0x40, 0x94, 0x38, 0xc7, 0xbc, + 0x48, 0x29, 0x4f, 0x32, 0x98, 0xd9, 0xaf, 0x35, 0x1a, 0xb, 0xf0, 0x87, 0x74, 0x39, 0xd6, + 0xe7, 0x86, 0x44, 0x9d, 0x5c, 0x7a, 0xde, 0x63, 0x1a, 0x16, 0xb2, 0x29, 0x1d, 0x46, 0x9e, + 0x61, 0xad, 0xff, 0x91, 0x6f, 0x51, 0x65, 0x8a, 0xb9, 0x37, 0xe, 0x65, 0xb6, 0x77, 0x2f, + 0xb7, 0x74, 0x6a, 0x9c, 0x8a, 0xf0, 0x4b, 0x2d, 0x87, 0xbf, 0x61, 0xff, 0x70, 0xdc, 0x29, + 0xec, 0x9a, 0xc, 0x7f, 0x12, 0xf6, 0x55, 0xea, 0x22, 0xb5, 0xf0, 0x1a, 0xd, 0xa5, 0xe8, + 0xc6, 0x7f, 0x1b, 0x9c, 0x55, 0x1b, 0x35, 0x5c, 0xac, 0x72, 0x26, 0x86, 0x89, 0x30, 0xd5, + 0x2d, 0x8, 0x93, 0xf, 0x9e, 0x1a, 0xfd, 0x8c, 0x7e, 0xdb, 0xca, 0x57, 0x4f, 0xd9, 0x42, + 0xd7, 0xf6, 0x74, 0xcd, 0xf6, 0x68, 0xef, 0xe3, 0x24, 0x66, 0x92, 0x29, 0xda, 0x96, 0x87, + 0x8e, 0xa2, 0x88, 0x23, 0x78, 0xee, 0xc3, 0xfc, 0x71, 0xfd, 0xb6, 0x36, 0x6b, 0xad, 0xd7, + 0x54, 0x55, 0x4d, 0xa0, 0xa3, 0x40, 0x70, 0x51, 0xc2, 0x76, 0xde, 0x9f, 0xa3, 0xe5, 0x7f, + 0x80, 0x72, 0xa9, 0xc3, 0x7f, 0x3e, 0x37, 0xd7, 0x7a, 0x99, 0x98, 0xc4, 0xc6, 0x4b, 0x51, + 0x93, 0xbc, 0xd0, 0xf2, 0x93, 0x9, 0x73, 0x7f, 0x6e, 0x7a, 0xb4, 0x6b, 0x7b, 0x79, 0xe0, + 0x45, 0x55, 0x39, 0xfc, 0x61, 0xa7, 0xde, 0xa5, 0xff, 0x80, 0x31, 0x39, 0x14, 0xf6, 0xb6, + 0x7, 0x6c, 0xd7, 0xa4, 0x10, 0xa0, 0x87, 0x55, 0x4d, 0xe5, 0xa5, 0x26, 0xc1, 0x99, 0xe, + 0x58, 0x19, 0xae, 0xc3, 0xbf, 0xe8, 0x16, 0x48, 0xe0, 0x85, 0x96, 0x51, 0x18, 0x72, 0xb8, + 0xf, 0x0, 0x9f, 0x26, 0xde, 0xec, 0x12, 0x32, 0xec, 0xd0, 0x3c, 0xde, 0x31, 0xb, 0xd6, + 0xbf, 0x4a, 0xc5, 0x66, 0x5c, 0xcd, 0xb0, 0x29, 0x3c, 0x6d, 0xc6, 0x18, 0x56, 0xd7, 0x17, + 0xb4, 0x4d, 0xeb, 0xdc, 0xbb, 0xe4, 0x4f, 0x1a, 0xf5, 0x72, 0x3a, 0x96, 0x44, 0x4d, 0xf3, + 0x14, 0xb1, 0x79, 0x75, 0xa4, 0x6a, 0xcc, 0x9d, 0x27, 0x47, 0xa9, 0x12, 0xa7, 0x7, 0xa8, + 0x30, 0xae, 0xf2, 0xde, 0xbc, 0x33, 0x87, 0xb5, 0x8c, 0x5, 0x3f, 0x45, 0x4e, 0x64, 0x4a, + 0x86, 0x6d, 0xc3, 0xf4, 0xfe, 0x5, 0x91, 0x81, 0x95, 0x2f, 0xad, 0x81, 0xda, 0x1b, 0x39, + 0xf8, 0xf0, 0xb8, 0x46, 0xf0, 0x38, 0x82, 0xa6, 0xf2, 0x35, 0x34, 0x4d, 0x9e, 0x17, 0x9a, + 0x97, 0xaf, 0xbd, 0x9b, 0x19, 0x31, 0x88, 0xd8, 0x3a, 0x50, 0x2e, 0x91, 0x50, 0x45, 0x5, + 0x92, 0x88, 0xb2, 0x7, 0x10, 0x9a, 0x6c, 0x44, 0xa2, 0x72, 0xf, 0xca, 0x68, 0x17, 0x99, + 0x1a, 0x62, 0xcd, 0x66, 0x23, 0xf, 0x90, 0xa4, 0x14, 0xa6, 0x6c, 0x7d, 0x6, 0xc4, 0x4b, + 0xbe, 0x81, 0x47, 0x72, 0xeb, 0xd4, 0xa2, 0x3d, 0x63, 0x73, 0x86, 0xef, 0xe, 0x2b, 0x78, + 0xd4, 0x4f, 0x48, 0x2e, 0xb0, 0x55, 0x8c, 0x8e, 0x5d, + ]; +} diff --git a/rdnf/rpm/src/rpm/signature/traits.rs b/rdnf/rpm/src/rpm/signature/traits.rs new file mode 100644 index 0000000000000000000000000000000000000000..f698d0faf03bc560b95658915f9c2431dd086d3a --- /dev/null +++ b/rdnf/rpm/src/rpm/signature/traits.rs @@ -0,0 +1,110 @@ +//! Trait abstractions of signinggraphic operations. +//! +//! Does not contain hashing! Hashes are fixed by the rpm +//! "spec" to sha1, md5 (yes, that is correct), sha2_256. + +#[allow(unused)] +use crate::errors::*; +use std::fmt::Debug; +use std::io::Read; + +pub mod algorithm { + + pub trait Algorithm: super::Debug {} + /// currently only RSA is required + /// + /// Farsight for future algorithm extensions of rpm + /// without breaking the API + #[derive(Debug, Clone, Copy)] + #[allow(non_camel_case_types)] + + pub struct RSA; + + impl Algorithm for RSA {} +} + +/// Signing trait to be implement for RPM signing. +pub trait Signing: Debug +where + A: algorithm::Algorithm, + Self::Signature: AsRef<[u8]>, +{ + type Signature; + fn sign(&self, data: R) -> Result; +} + +impl Signing for &T +where + T: Signing, + A: algorithm::Algorithm, + S: AsRef<[u8]>, +{ + type Signature = S; + fn sign(&self, data: R) -> Result { + T::sign::(self, data) + } +} + +/// Verification trait to be implement for RPM signature verification. +pub trait Verifying: Debug +where + A: algorithm::Algorithm, + Self::Signature: AsRef<[u8]>, +{ + type Signature; + fn verify(&self, data: R, signature: &[u8]) -> Result<(), RPMError>; +} + +impl Verifying for &T +where + T: Verifying, + A: algorithm::Algorithm, + S: AsRef<[u8]>, +{ + type Signature = S; + fn verify(&self, data: R, signature: &[u8]) -> Result<(), RPMError> { + T::verify::(self, data, signature) + } +} + +pub mod key { + + /// Marker trait for key types. + pub trait KeyType: super::Debug + Copy {} + + /// A secret key that should not be shared with any other party + /// under any circumstance. + #[derive(Debug, Clone, Copy)] + pub struct Secret; + + /// A key publishable to the public. + #[derive(Debug, Clone, Copy)] + pub struct Public; + + impl KeyType for Secret {} + impl KeyType for Public {} +} + +/// Implement unreachable signer for empty tuple `()` +impl Signing for std::marker::PhantomData +where + A: algorithm::Algorithm, +{ + type Signature = Vec; + fn sign(&self, _data: R) -> Result { + unreachable!("if you want to verify, you need to implement `sign` of the `Signing` trait") + } +} + +/// Implement unreachable verifier for the empty tuple`()` +impl Verifying for std::marker::PhantomData +where + A: algorithm::Algorithm, +{ + type Signature = Vec; + fn verify(&self, _data: R, _x: &[u8]) -> Result<(), RPMError> { + unreachable!( + "if you want to verify, you need to implement `verify` of the `Verifying` trait" + ) + } +} diff --git a/rdnf/rpm/src/sequential_cursor.rs b/rdnf/rpm/src/sequential_cursor.rs new file mode 100644 index 0000000000000000000000000000000000000000..527b286ae5245d02fac3fabb1d0f35dde86f576b --- /dev/null +++ b/rdnf/rpm/src/sequential_cursor.rs @@ -0,0 +1,162 @@ +//! Cursor implementation over multiple slices +use std::io::{Seek, SeekFrom}; + +pub(crate) struct SeqCursor<'s> { + cursors: Vec>, + position: u64, + len: usize, +} + +impl<'s> SeqCursor<'s> { + /// Add an additional slice to the end of the cursor + /// + /// Does not modify the current cursors position. + #[allow(unused)] + pub(crate) fn add<'b>(&mut self, another: &'b [u8]) + where + 'b: 's, + { + let cursor = std::io::Cursor::<&'s [u8]>::new(another); + self.cursors.push(cursor); + self.len += another.len(); + } + + /// Crate a new cursor based on a slice of bytes slices. + pub(crate) fn new<'b>(slices: &[&'b [u8]]) -> Self + where + 'b: 's, + { + let len = slices.iter().fold(0usize, |acc, slice| slice.len() + acc); + Self { + cursors: slices + .iter() + .map(|slice| std::io::Cursor::new(*slice)) + .collect::>(), + position: 0u64, + len, + } + } + + /// Total length of all slices summed up. + pub(crate) fn len(&self) -> usize { + self.len + } +} + +impl<'s> std::io::Read for SeqCursor<'s> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + let mut total_read = 0usize; + let mut acc_offset = 0usize; + for cursor in self.cursors.iter_mut() { + let chunk_len = cursor.get_ref().len(); + acc_offset += chunk_len; + if self.position <= acc_offset as u64 { + // remaining unread bytes + let rem_unread_in_chunk = (acc_offset as u64 - self.position) as usize; + // seek to the beginning of the currently first unread byte in the + // iterations cursor + cursor.seek(SeekFrom::Start( + chunk_len as u64 - rem_unread_in_chunk as u64, + ))?; + let fin = std::cmp::min(total_read + rem_unread_in_chunk, buf.len()); + let read = cursor.read(&mut buf[total_read..fin])?; + self.position += read as u64; + total_read += read; + if total_read >= buf.len() { + debug_assert_eq!(total_read, buf.len(), "Always equal. qed"); + break; + } + } + } + Ok(total_read) + } +} + +impl<'s> std::io::Seek for SeqCursor<'s> { + fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { + self.position = match pos { + std::io::SeekFrom::Start(rel) => rel, + std::io::SeekFrom::End(rel) => (self.len as i64 + rel) as u64, + std::io::SeekFrom::Current(rel) => (self.position as i64 + rel) as u64, + }; + Ok(self.position) + } +} + +#[cfg(test)] +mod test { + use super::*; + use std::io::Read; + use std::io::Seek; + + #[test] + fn sequential_cursor() { + let c1 = vec![1u8; 17]; + let c2 = vec![2u8; 17]; + let c3 = vec![3u8; 17]; + + let mut buf = Vec::::with_capacity(17 * 3); + unsafe { + buf.set_len(17 * 3); + } + let mut sq = SeqCursor::new(&[c1.as_slice(), c2.as_slice(), c3.as_slice()]); + + sq.seek(std::io::SeekFrom::Current(16)).unwrap(); + sq.read(&mut buf[0..4]).unwrap(); + assert_eq!(buf[0..4].to_vec(), vec![1u8, 2u8, 2u8, 2u8]); + + sq.seek(std::io::SeekFrom::Current(12)).unwrap(); + sq.read(&mut buf[4..8]).unwrap(); + assert_eq!(buf[4..8].to_vec(), vec![2u8, 2u8, 3u8, 3u8]); + } + + #[test] + fn sequential_cursor_with_short_buffer() { + let c1 = vec![1u8, 2u8]; + let c2 = vec![3u8, 4u8]; + let mut sq = SeqCursor::new(&[c1.as_slice(), c2.as_slice()]); + + //read with a short buffer + let mut buf = vec![0u8; 1]; + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![1u8]); + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![2u8]); + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![3u8]); + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![4u8]); + } + + #[test] + fn sequential_cursor_with_seek() { + let c1 = vec![1u8; 2]; + let c2 = vec![2u8; 2]; + let c3 = vec![3u8; 2]; + + let mut buf = vec![0u8; 6]; + // without seek + let mut sq = SeqCursor::new(&[c1.as_slice(), c2.as_slice(), c3.as_slice()]); + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![1u8, 1u8, 2u8, 2u8, 3u8, 3u8]); + + // seek with start + let mut buf = vec![0u8; 5]; + sq.seek(SeekFrom::Start(1)).unwrap(); + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![1u8, 2u8, 2u8, 3u8, 3u8]); + + //seek with current + let mut buf = vec![0u8; 5]; + sq.seek(SeekFrom::Start(0)).unwrap(); + sq.seek(SeekFrom::Current(1)).unwrap(); + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![1u8, 2u8, 2u8, 3u8, 3u8]); + + //seek with end + let mut buf = vec![0u8; 3]; + sq.seek(SeekFrom::End(-3)).unwrap(); + sq.read(&mut buf).unwrap(); + assert_eq!(buf.to_vec(), vec![2u8, 3u8, 3u8]); + } +} diff --git a/rdnf/rpm/src/test_rpm.rs b/rdnf/rpm/src/test_rpm.rs new file mode 100644 index 0000000000000000000000000000000000000000..ecc46700d572484015cb44f734d52dab6a351e6c --- /dev/null +++ b/rdnf/rpm/src/test_rpm.rs @@ -0,0 +1,171 @@ + + +#[test] +pub fn test_provide() { + let f = File::open("test_assets/389-ds-base-2.2.3-1.fc37.x86_64.rpm").unwrap(); + let mut buf = BufReader::new(f); + // let header: Header = Header::parse(&mut buf).unwrap(); + let p = RPMPackage::parse(&mut buf).unwrap(); + // dbg!(p.metadata.get_packager().unwrap()); + // dbg!(p.metadata.get_file_paths().unwrap()); + let names: &[String] = p + .metadata + .header + .get_entry_data_as_string_array(IndexTag::RPMTAG_PROVIDENAME) + .unwrap(); + let flags = p + .metadata + .header + .get_entry_data_as_u32_array(IndexTag::RPMTAG_PROVIDEFLAGS) + .unwrap(); + let versions = p + .metadata + .header + .get_entry_data_as_string_array(IndexTag::RPMTAG_PROVIDEVERSION) + .unwrap(); + let v = itertools::multizip((names, flags, versions)) + .try_fold::, _, Result<_, RPMError>>( + Vec::with_capacity(names.len()), + |mut acc, (name, flag_num, ver)| { + let flag_str = if flag_num & RPMSENSE_LESS == RPMSENSE_LESS { + if flag_num & RPMSENSE_EQUAL == RPMSENSE_EQUAL { + "LE" + } else { + "LT" + } + } else if flag_num & RPMSENSE_GREATER == RPMSENSE_GREATER { + if flag_num & RPMSENSE_EQUAL == RPMSENSE_EQUAL { + "GE" + } else { + "GT" + } + } else if flag_num & RPMSENSE_EQUAL == RPMSENSE_EQUAL { + "EQ" + } else { + "NULL" + }; + acc.push(( + name.to_owned(), + flag_num, + flag_str.to_owned(), + ver.to_owned(), + )); + Ok(acc) + }, + ) + .unwrap(); + for (name, flag_num, flag_str, ver) in &v { + println!("{}\t\t\t\t{}\t{}\t\t{}", name, flag_num, flag_str, ver); + } + dbg!(v.len()); + // dbg!(provide_name); + // dbg!(provide_attrsx); + // dbg!(provide_flags); + // dbg!(provide_nevrs); + // dbg!(provide_version); + // k.iter().zip + // dbg!(p.metadata.get_provide().unwrap()); +} +#[test] +pub fn test_req() { + let f = File::open("test_assets/libgcc-12.2.1-2.fc37.x86_64.rpm").unwrap(); + let mut buf = BufReader::new(f); + // let header: Header = Header::parse(&mut buf).unwrap(); + let p = RPMPackage::parse(&mut buf).unwrap(); + // dbg!(p.metadata.get_packager().unwrap()); + // dbg!(p.metadata.get_file_paths().unwrap()); + let rpms = p.metadata.get_provides().unwrap(); + for ele in &rpms { + println!( + "{}\t\t\t{:?}\t{:?}\t{:?}\t{:?}", + ele.name, ele.flags, ele.epoch, ele.version, ele.release + ); + } + dbg!(rpms.len()); + // for (name, flag, flag_str, ver) in &v { + // println!("{}\t\t\t\t{}\t{}\t\t{}", name, flag, flag_str, ver); + // } + // dbg!(v.len()); +} +#[test] +pub fn test_pre() { + let f = File::open("test_assets/libgcc-12.2.1-2.fc37.x86_64.rpm").unwrap(); + let mut input = BufReader::new(f); + let mut lead_buffer = [0; LEAD_SIZE]; + input.read_exact(&mut lead_buffer).unwrap(); + let lead = Lead::parse(&lead_buffer).unwrap(); + let signature_header = Header::parse_signature(&mut input).unwrap(); + let mut buf = Vec::new(); + input.read_to_end(&mut buf).unwrap(); + let mut pre = OpenOptions::new() + .create(true) + .append(true) + .write(true) + .open("test_assets/test_pre.bin") + .unwrap(); + pre.write(&buf).unwrap(); +} +#[test] +pub fn test_blob() { + let mut f = File::open("test_assets/test_2.bin").unwrap(); + let mut v = vec![0x8e, 0xad, 0xe8, 0x01, 0x00, 0x00, 0x00, 0x00]; + // BufReader::new(b); + f.read_to_end(v.as_mut()).unwrap(); + let mut input = Cursor::new(v); + let h: Header = Header::parse(&mut input).unwrap(); + dbg!(h.get_provides().unwrap()); + dbg!(h.get_requires().unwrap()); + // RPMPackage::open(path) + // RPMPackageMetadata:: + // let mut input = BufReader::new(f); + // let src: [u8; 8] = [0x8e, 0xad, 0xe8, 0x01, 0x00, 0x00, 0x00, 0x00]; + // input.buffer().copy_from_slice(src.as_ref()); + // let k = input.borrow_mut().buffer(); + // k.copy_from_slice(&src); + // let mut buf: [u8; 16] = [0; 16]; + // input.read_exact(&mut buf).unwrap(); + // let mut b; + // let (num, head_size) = parse_blob(&buf).unwrap(); + // let mut buf = vec![0; (head_size + num * 16) as usize]; + // input.read_exact(&mut buf).unwrap(); + // Header::parse_header(index_header, bytes); + // let k = buf.as_ref(); + // let (rest, num) = be_u32(buf.as_ref()).unwrap(); +} +// pub fn parse_blob(input: &[u8]) -> Result<(u32, u32), RPMError> { +// let (rest, num) = be_u32(input)?; +// let (_rest, header_size) = be_u32(input)?; +// Ok((num, header_size)) +// } +#[test] +pub fn test() { + let ver = "7.0.6"; + let mut epoch = ""; + let mut version = ""; + let mut release = ""; + if ver != "" { + let rest = match ver.split_once(":") { + Some((e, rest)) => { + epoch = e; + rest + } + None => { + epoch = "0"; + ver + } + }; + match rest.split_once("-") { + Some((v, r)) => { + version = v; + release = r; + } + None => { + version = rest; + } + } + } + + dbg!(epoch); + dbg!(version); + dbg!(release); +} diff --git a/rdnf/rpm/src/tests.rs b/rdnf/rpm/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..48889ae5b0c48509d717d9d04a34a56d34182fa1 --- /dev/null +++ b/rdnf/rpm/src/tests.rs @@ -0,0 +1,432 @@ +use super::*; + +#[cfg(feature = "signature-meta")] +use crate::signature::pgp::{Signer, Verifier}; + +fn test_private_key_path() -> std::path::PathBuf { + let mut rpm_path = cargo_manifest_dir(); + rpm_path.push("test_assets/secret_key.asc"); + rpm_path +} + +fn test_public_key_path() -> std::path::PathBuf { + let mut rpm_path = cargo_manifest_dir(); + rpm_path.push("test_assets/public_key.asc"); + rpm_path +} + +fn test_rpm_file_path() -> std::path::PathBuf { + let mut rpm_path = cargo_manifest_dir(); + rpm_path.push("test_assets/389-ds-base-devel-1.3.8.4-15.el7.x86_64.rpm"); + rpm_path +} + +fn file_signatures_test_rpm_file_path() -> std::path::PathBuf { + let mut rpm_path = cargo_manifest_dir(); + rpm_path.push("test_assets/ima_signed.rpm"); + rpm_path +} + +fn cargo_manifest_dir() -> std::path::PathBuf { + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) +} + +#[cfg(feature = "signature-meta")] +#[test] +fn test_rpm_file_signatures() -> Result<(), Box> { + let rpm_file_path = file_signatures_test_rpm_file_path(); + let package = RPMPackage::open(rpm_file_path)?; + let metadata = &package.metadata; + + let signatures = metadata.get_file_ima_signatures()?; + + assert_eq!( + signatures, + [ + "0302041adfaa0e004630440220162785458f5d81d1393cc72afc642c86167c15891ea39213e28907b1c4e8dc6c02202fa86ad2f5e474d36c59300f736f52cb5ed24abb55759a71ec224184a7035a78", + "0302041adfaa0e00483046022100bd940093777b75650980afb656507f2729a05c9b1bc9986993106de9f301a172022100b3384f6ba200a5a80647a0f0727c5b8f3ab01f74996a1550db605b44af3d10bf", + "0302041adfaa0e00473045022068953626d7a5b65aa4b1f1e79a2223f2d3500ddcb3d75a7050477db0480a13e10221008637cefe8c570044e11ff95fa933c1454fd6aa8793bbf3e87edab2a2624df460", + ], + ); + + Ok(()) +} + +#[cfg(feature = "signature-meta")] +#[test] +fn test_rpm_file_signatures_resign() -> Result<(), Box> { + let rpm_file_path = file_signatures_test_rpm_file_path(); + let mut package = RPMPackage::open(rpm_file_path)?; + + let private_key_content = std::fs::read(test_private_key_path())?; + let signer = Signer::load_from_asc_bytes(&private_key_content)?; + + package.sign(&signer)?; + + let public_key_content = std::fs::read(test_public_key_path())?; + let verifier = Verifier::load_from_asc_bytes(&public_key_content).unwrap(); + package + .verify_signature(&verifier) + .expect("failed to verify signature"); + Ok(()) +} + +fn test_rpm_header_base(package: RPMPackage) -> Result<(), Box> { + let metadata = &package.metadata; + assert_eq!(metadata.signature.index_entries.len(), 7); + assert_eq!(metadata.signature.index_entries[0].num_items, 16); + assert_eq!(metadata.signature.index_header.header_size, 1156); + + assert_eq!(package.metadata.get_name().unwrap(), "389-ds-base-devel"); + assert!(package.metadata.get_epoch().is_err()); + assert_eq!(package.metadata.get_version().unwrap(), "1.3.8.4"); + assert_eq!(package.metadata.get_release().unwrap(), "15.el7"); + assert_eq!(package.metadata.get_arch().unwrap(), "x86_64"); + + assert_eq!( + package.metadata.get_url().unwrap(), + "https://www.port389.org/" + ); + + // TODO: vcs + // assert_eq!( + // package.metadata.get_vcs().unwrap(), + // "git://pkgs.fedoraproject.org/389-ds-base.git" + // ); + + assert_eq!( + package.metadata.get_packager().unwrap(), + "CentOS BuildSystem " + ); + assert_eq!(package.metadata.get_license().unwrap(), "GPLv3+"); + assert_eq!(package.metadata.get_vendor().unwrap(), "CentOS"); + + // TODO: internationalized strings + // assert_eq!( + // package.metadata.get_summary().unwrap(), + // "Development libraries for 389 Directory Server" + // ); + // assert_eq!( + // package.metadata.get_description().unwrap(), + // "Development Libraries and headers for the 389 Directory Server base package." + // ); + // assert_eq!( + // package.metadata.get_group().unwrap(), + // "Development/Libraries" + // ); + assert_eq!( + package.metadata.get_source_rpm().unwrap(), + "389-ds-base-1.3.8.4-15.el7.src.rpm" + ); + assert_eq!( + package.metadata.get_build_host().unwrap(), + "x86-01.bsys.centos.org" + ); + assert_eq!(package.metadata.get_build_time().unwrap(), 1540945151); + + assert_eq!(package.metadata.get_payload_compressor().unwrap(), "xz"); + assert_eq!(package.metadata.get_payload_format().unwrap(), "cpio"); + + assert_eq!(package.metadata.is_source_package(), false); + + let expected_data = vec![ + ( + 16, + IndexData::Bin(vec![ + 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0x90, 0x00, 0x00, + 0x00, 0x10, + ]), + IndexSignatureTag::HEADER_SIGNATURES, + ), + ( + 536, + IndexData::Bin(vec![ + 0x89, 0x02, 0x15, 0x03, 0x05, 0x00, 0x5b, 0xe9, 0x8c, 0x5b, 0x24, 0xc6, 0xa8, 0xa7, + 0xf4, 0xa8, 0x0e, 0xb5, 0x01, 0x08, 0xa8, 0x4c, 0x0f, 0xfd, 0x1a, 0x9d, 0xe3, 0x0f, + 0x7e, 0xbb, 0x74, 0xe3, 0x62, 0xef, 0xfd, 0x4d, 0x1c, 0x11, 0xa1, 0x68, 0x22, 0x0d, + 0xff, 0x4a, 0x72, 0x11, 0x18, 0xe4, 0xb0, 0x46, 0x6b, 0x11, 0x82, 0xc6, 0xd4, 0xd6, + 0xdb, 0x53, 0x64, 0x1b, 0x32, 0x33, 0x41, 0x95, 0xf3, 0x0c, 0xa6, 0xc2, 0x50, 0xee, + 0x81, 0x81, 0x6a, 0x08, 0x05, 0xfa, 0x3b, 0x26, 0x66, 0x63, 0x5c, 0xfa, 0x4b, 0x25, + 0x02, 0xe7, 0xad, 0x3f, 0x4f, 0x82, 0x7a, 0xa3, 0x4d, 0xad, 0x0d, 0xa0, 0x19, 0x63, + 0x77, 0xd2, 0x18, 0x30, 0x54, 0xc7, 0x14, 0x23, 0x22, 0x0b, 0x0d, 0xd8, 0xba, 0x1b, + 0x6c, 0x94, 0xb3, 0x0f, 0xb3, 0x82, 0x18, 0x62, 0x33, 0x51, 0x4e, 0xaa, 0xfa, 0x84, + 0x8a, 0x4b, 0xcd, 0x82, 0x72, 0xf1, 0x40, 0x94, 0x38, 0xc7, 0xbc, 0x48, 0x29, 0x4f, + 0x32, 0x98, 0xd9, 0xaf, 0x35, 0x1a, 0x0b, 0xf0, 0x87, 0x74, 0x39, 0xd6, 0xe7, 0x86, + 0x44, 0x9d, 0x5c, 0x7a, 0xde, 0x63, 0x1a, 0x16, 0xb2, 0x29, 0x1d, 0x46, 0x9e, 0x61, + 0xad, 0xff, 0x91, 0x6f, 0x51, 0x65, 0x8a, 0xb9, 0x37, 0x0e, 0x65, 0xb6, 0x77, 0x2f, + 0xb7, 0x74, 0x6a, 0x9c, 0x8a, 0xf0, 0x4b, 0x2d, 0x87, 0xbf, 0x61, 0xff, 0x70, 0xdc, + 0x29, 0xec, 0x9a, 0x0c, 0x7f, 0x12, 0xf6, 0x55, 0xea, 0x22, 0xb5, 0xf0, 0x1a, 0x0d, + 0xa5, 0xe8, 0xc6, 0x7f, 0x1b, 0x9c, 0x55, 0x1b, 0x35, 0x5c, 0xac, 0x72, 0x26, 0x86, + 0x89, 0x30, 0xd5, 0x2d, 0x08, 0x93, 0x0f, 0x9e, 0x1a, 0xfd, 0x8c, 0x7e, 0xdb, 0xca, + 0x57, 0x4f, 0xd9, 0x42, 0xd7, 0xf6, 0x74, 0xcd, 0xf6, 0x68, 0xef, 0xe3, 0x24, 0x66, + 0x92, 0x29, 0xda, 0x96, 0x87, 0x8e, 0xa2, 0x88, 0x23, 0x78, 0xee, 0xc3, 0xfc, 0x71, + 0xfd, 0xb6, 0x36, 0x6b, 0xad, 0xd7, 0x54, 0x55, 0x4d, 0xa0, 0xa3, 0x40, 0x70, 0x51, + 0xc2, 0x76, 0xde, 0x9f, 0xa3, 0xe5, 0x7f, 0x80, 0x72, 0xa9, 0xc3, 0x7f, 0x3e, 0x37, + 0xd7, 0x7a, 0x99, 0x98, 0xc4, 0xc6, 0x4b, 0x51, 0x93, 0xbc, 0xd0, 0xf2, 0x93, 0x09, + 0x73, 0x7f, 0x6e, 0x7a, 0xb4, 0x6b, 0x7b, 0x79, 0xe0, 0x45, 0x55, 0x39, 0xfc, 0x61, + 0xa7, 0xde, 0xa5, 0xff, 0x80, 0x31, 0x39, 0x14, 0xf6, 0xb6, 0x07, 0x6c, 0xd7, 0xa4, + 0x10, 0xa0, 0x87, 0x55, 0x4d, 0xe5, 0xa5, 0x26, 0xc1, 0x99, 0x0e, 0x58, 0x19, 0xae, + 0xc3, 0xbf, 0xe8, 0x16, 0x48, 0xe0, 0x85, 0x96, 0x51, 0x18, 0x72, 0xb8, 0x0f, 0x00, + 0x9f, 0x26, 0xde, 0xec, 0x12, 0x32, 0xec, 0xd0, 0x3c, 0xde, 0x31, 0x0b, 0xd6, 0xbf, + 0x4a, 0xc5, 0x66, 0x5c, 0xcd, 0xb0, 0x29, 0x3c, 0x6d, 0xc6, 0x18, 0x56, 0xd7, 0x17, + 0xb4, 0x4d, 0xeb, 0xdc, 0xbb, 0xe4, 0x4f, 0x1a, 0xf5, 0x72, 0x3a, 0x96, 0x44, 0x4d, + 0xf3, 0x14, 0xb1, 0x79, 0x75, 0xa4, 0x6a, 0xcc, 0x9d, 0x27, 0x47, 0xa9, 0x12, 0xa7, + 0x07, 0xa8, 0x30, 0xae, 0xf2, 0xde, 0xbc, 0x33, 0x87, 0xb5, 0x8c, 0x05, 0x3f, 0x45, + 0x4e, 0x64, 0x4a, 0x86, 0x6d, 0xc3, 0xf4, 0xfe, 0x05, 0x91, 0x81, 0x95, 0x2f, 0xad, + 0x81, 0xda, 0x1b, 0x39, 0xf8, 0xf0, 0xb8, 0x46, 0xf0, 0x38, 0x82, 0xa6, 0xf2, 0x35, + 0x34, 0x4d, 0x9e, 0x17, 0x9a, 0x97, 0xaf, 0xbd, 0x9b, 0x19, 0x31, 0x88, 0xd8, 0x3a, + 0x50, 0x2e, 0x91, 0x50, 0x45, 0x05, 0x92, 0x88, 0xb2, 0x07, 0x10, 0x9a, 0x6c, 0x44, + 0xa2, 0x72, 0x0f, 0xca, 0x68, 0x17, 0x99, 0x1a, 0x62, 0xcd, 0x66, 0x23, 0x0f, 0x90, + 0xa4, 0x14, 0xa6, 0x6c, 0x7d, 0x06, 0xc4, 0x4b, 0xbe, 0x81, 0x47, 0x72, 0xeb, 0xd4, + 0xa2, 0x3d, 0x63, 0x73, 0x86, 0xef, 0x0e, 0x2b, 0x78, 0xd4, 0x4f, 0x48, 0x2e, 0xb0, + 0x55, 0x8c, 0x8e, 0x5d, + ]), + IndexSignatureTag::RPMSIGTAG_RSA, + ), + ( + 1, + IndexData::StringTag("6178620331c1fe63c5dd3da7c118058e366e37d8".to_string()), + IndexSignatureTag::RPMSIGTAG_SHA1, + ), + ( + 1, + IndexData::Int32(vec![275_904]), + IndexSignatureTag::RPMSIGTAG_SIZE, + ), + ( + 536, + IndexData::Bin(vec![ + 0x89, 0x02, 0x15, 0x03, 0x05, 0x00, 0x5b, 0xe9, 0x8c, 0x5b, 0x24, 0xc6, 0xa8, 0xa7, + 0xf4, 0xa8, 0x0e, 0xb5, 0x01, 0x08, 0x54, 0xe7, 0x10, 0x00, 0xc4, 0xbb, 0xc5, 0x5b, + 0xe7, 0xe3, 0x80, 0xbd, 0xe9, 0x0a, 0xc6, 0x32, 0x6a, 0x42, 0x4a, 0xb0, 0xa9, 0xf5, + 0x95, 0xf1, 0xa9, 0x31, 0x4a, 0x22, 0xfc, 0xf8, 0xdc, 0xcf, 0x89, 0xd8, 0x30, 0x19, + 0x83, 0x55, 0xf0, 0xb5, 0xa1, 0x0c, 0xd3, 0x6b, 0x69, 0x21, 0x8f, 0x05, 0xe5, 0x17, + 0x5c, 0x29, 0x99, 0x84, 0x84, 0xc6, 0xf2, 0xa7, 0xcf, 0xe9, 0xd4, 0x99, 0x42, 0x20, + 0x39, 0xf5, 0xd9, 0x96, 0x6a, 0xc3, 0x01, 0x13, 0xfa, 0x46, 0xee, 0x6d, 0xcb, 0x01, + 0xf7, 0xc9, 0x34, 0x26, 0x8e, 0x9e, 0xba, 0x5d, 0x89, 0xb9, 0xd9, 0x21, 0x15, 0x06, + 0x51, 0xa6, 0xad, 0x70, 0xc5, 0x3a, 0xd8, 0xa8, 0x84, 0x94, 0xbe, 0x29, 0xc1, 0x9b, + 0x53, 0x38, 0x26, 0x90, 0x8b, 0x7d, 0xd2, 0xa0, 0x7c, 0xcc, 0xa2, 0x77, 0x60, 0xfa, + 0xb9, 0x7f, 0x90, 0x77, 0xc7, 0xb9, 0xad, 0x7e, 0xab, 0xa0, 0xdb, 0xa3, 0x29, 0xec, + 0x72, 0xa0, 0x70, 0xd1, 0xed, 0x9a, 0x8c, 0x30, 0x6b, 0xdf, 0xc5, 0x8b, 0x0f, 0xc8, + 0x14, 0xca, 0xe1, 0x2b, 0x95, 0x14, 0x6a, 0x70, 0x21, 0x23, 0x49, 0x14, 0x70, 0xe6, + 0x84, 0xe1, 0xf1, 0xd0, 0x6f, 0xc0, 0x7d, 0xcd, 0xb7, 0xdf, 0xd4, 0xc6, 0xd3, 0xd0, + 0x17, 0x5d, 0xb3, 0xf4, 0xaf, 0xd3, 0xea, 0xaa, 0xed, 0x2f, 0x72, 0x02, 0xfb, 0xd4, + 0x46, 0x75, 0x2a, 0xc3, 0x38, 0x50, 0xd7, 0xb2, 0x5b, 0x61, 0x64, 0x25, 0x07, 0x8c, + 0x9b, 0x01, 0xf8, 0x6f, 0xeb, 0xbb, 0x5d, 0xb0, 0x02, 0x81, 0x30, 0xeb, 0x4b, 0x01, + 0xe1, 0xff, 0x9f, 0x24, 0xa7, 0xe3, 0xde, 0x71, 0x51, 0x96, 0x92, 0xd0, 0x60, 0x18, + 0xc3, 0x60, 0xd5, 0xae, 0xd7, 0x40, 0x26, 0x57, 0xf3, 0xdb, 0x6a, 0x81, 0x97, 0x64, + 0x10, 0x24, 0x05, 0x7d, 0x54, 0x95, 0x8d, 0x36, 0x5f, 0x23, 0xd7, 0x17, 0x1a, 0x83, + 0xca, 0xf0, 0xe6, 0x1d, 0x27, 0x22, 0xdc, 0xb6, 0x04, 0x0d, 0xe8, 0x25, 0xe6, 0xc4, + 0xe0, 0x26, 0x17, 0x42, 0x03, 0x36, 0xfe, 0xf8, 0xc7, 0xc2, 0xdb, 0xa2, 0xb7, 0x99, + 0x3a, 0xec, 0xe2, 0xd4, 0x93, 0x3d, 0x53, 0x0d, 0x26, 0x96, 0x84, 0x6e, 0x4b, 0xfa, + 0xb3, 0xca, 0x98, 0x8a, 0x65, 0xa8, 0x62, 0x7d, 0xbf, 0x1f, 0x80, 0xbf, 0xa3, 0xa6, + 0xe7, 0x03, 0x0e, 0x15, 0xb7, 0x73, 0x37, 0xdb, 0x35, 0x35, 0x6f, 0xce, 0x71, 0xd0, + 0x3c, 0x15, 0x76, 0x6d, 0x26, 0xe5, 0xf6, 0xae, 0x50, 0xc8, 0x28, 0xa5, 0xb3, 0xdf, + 0xd3, 0x24, 0xb9, 0x3f, 0xfd, 0xcc, 0x02, 0x60, 0xe4, 0xfd, 0x10, 0x71, 0x0a, 0xbe, + 0xdf, 0x19, 0x23, 0xa1, 0x71, 0xe6, 0x99, 0x3c, 0xef, 0xd5, 0x41, 0x20, 0x7a, 0x9a, + 0x8c, 0x24, 0xe8, 0x74, 0x83, 0xdd, 0xab, 0xea, 0x87, 0x38, 0xca, 0x8e, 0x3d, 0x60, + 0x14, 0x20, 0xc7, 0x02, 0xed, 0xa1, 0xdc, 0xd5, 0xcf, 0x22, 0x14, 0x14, 0x93, 0x9c, + 0x68, 0x95, 0xbf, 0x6e, 0xdd, 0x28, 0x3e, 0xfc, 0xa0, 0xfb, 0x37, 0xdf, 0x9c, 0x7c, + 0xef, 0x37, 0x11, 0x7a, 0xa3, 0x28, 0x71, 0xd5, 0xca, 0xa3, 0x17, 0x09, 0xa9, 0x92, + 0xc9, 0x1a, 0x2b, 0x5d, 0xac, 0x0e, 0xee, 0x10, 0xc4, 0x97, 0xad, 0x18, 0x4e, 0x1a, + 0xb7, 0x2a, 0xd2, 0x1c, 0xb6, 0x9d, 0x8b, 0x22, 0x91, 0x61, 0x9f, 0x6e, 0xe0, 0x06, + 0x9c, 0xc2, 0x21, 0x8f, 0x24, 0x95, 0x80, 0x19, 0x17, 0x15, 0x5c, 0xba, 0x27, 0x9f, + 0xa4, 0xc8, 0x19, 0xd1, 0xfb, 0x64, 0xf7, 0x36, 0x5e, 0x6b, 0x36, 0xba, 0x25, 0x27, + 0x3d, 0x31, 0x74, 0x9e, 0x53, 0xf7, 0x23, 0xe2, 0x00, 0x0c, 0x86, 0x9c, 0xab, 0x3f, + 0xf5, 0x44, 0x6e, 0xaa, 0xd8, 0x03, 0x8b, 0x2e, 0x8c, 0xca, 0x14, 0xfe, 0x1d, 0xad, + 0x6b, 0x5e, 0x60, 0x8d, + ]), + IndexSignatureTag::RPMSIGTAG_PGP, + ), + ( + 16, + IndexData::Bin(vec![ + 0xdb, 0x6d, 0xf4, 0x9b, 0x40, 0x19, 0x6e, 0x84, 0x5e, 0xed, 0x42, 0xe2, 0x16, 0x62, + 0x28, 0x67, + ]), + IndexSignatureTag::RPMSIGTAG_MD5, + ), + ( + 1, + IndexData::Int32(vec![510_164]), + IndexSignatureTag::RPMSIGTAG_PAYLOADSIZE, + ), + ]; + + for (i, (len, data, tag)) in expected_data.iter().enumerate() { + assert_eq!(*len as u32, metadata.signature.index_entries[i].num_items); + assert_eq!(data, &metadata.signature.index_entries[i].data); + assert_eq!(*tag, metadata.signature.index_entries[i].tag); + } + + assert_eq!("cpio", metadata.get_payload_format()?); + assert_eq!("xz", metadata.get_payload_compressor()?); + + let expected_file_checksums = vec![ + "", + "3e4e2501e2a70343a661b0b85b82e27b2090a7e595dc3b5c91e732244ffc3272", + "d36ab638ed0635afcb1582387d676b2e461c5a88ac05a6e2aada8b40b4175bc1", + "9667aa81021c9f4d48690ef6fbb3e7d623bdae94e2da414abd044dc38e52f037", + "1e8235e08aac746155c209c1e641e73bf7a4c34d9971aaa9f864226bd5de9d99", + "53a1e216749208c0bdfc9e8ec70f4bb9459ad1ff224571a7a432e472d2202986", + "2807bb4e77579c81dc7e283d60612a6ecc3ce56000691cac744a4bca73cea241", + "", + "", + "", + "", + "", + "a839e2870b7a212ca9dc6f92007907bc42de1984eac6c278a519d4115071f322", + "3ca364e71a110cd0f2317fbaf99bc8552b8374dbeaf0a989695990f940d88bea", + "eead9f55f0774559d37b20fbc5448f978e1a80d27f488768cbbb278a932e7e9f", + "", + "495b7c1e22dcc0f37d78076a1fcad786b69ac78f1e806466d798fd8fc4a5d10d", + "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903", + "a73b7d3598e98f46aeb0559e641d3e6ac83c0fc34e1e5fa98cb9d4a6050bacd9", + "97a6a0413ce3664e192dff12a29bc3f690c24e8a0d48d986478c56cdfe370c3b", + "d110052464fd35c5dc227b3f071606ec40c12ba773fec9ec88ad01430bd4a27b", + "5c3adbdea58a8bb7663c65216dda7d1f38a17b067f718df46ece04ecb503f689", + "005dc9d5aa85b10c3200535af8b0ed2123770e3a79d48be5067e81cc553d55bd", + "aa7ea2def38dfc965b27ae20467006aca779e02ad366d50824c4615a7d43af27", + "5ee25b47a83b1431f6ecb1d0a292a8e9a2917c1de9e87129c86cdda743be3f55", + "413aae4fb264aad9d35db94eb28b5f70a7183101692943e81bc90d6718418d8e", + "66004b2e338ce29e59d6a26467e251f092ae0a0f33b67dbba67d2ea9f3ec89f6", + "3db4ad3317bff658a04a1bdbc01fab83cd348f76a1d44585b892fdb0223f2b77", + "ccac76a229e6739ab318d9ede59f6b980d3200fc50669409d3b1e8a0ff1fa029", + "5a3378c84c68e2a407add0f850c64d701af2aedcca67dd2489e86cb1e08dbb6b", + "da188ece6801b97c98031b854d4000e348e969edea239cb1bcbfae7a194e3520", + "28a93db2fe665e8b08494fe5adf3d8dc00c2f96a4994a09eb70cf982d912fa09", + "ba92ea5c90389b38a3c003a5e4a7b09e57473cbd2fb3645c2c0012808023fd0b", + "502dd15afe5609a113108cad047a810b7a97cc8819e830f1d5b00cb5bf65a295", + "4445b3e6550a3d7da96a246e6138d3f349160420085ce14222d3f686eb29915c", + "649f748bffe197539db9237d56da8a3e408731488550617596359cd32731ec06", + "4bd801d053bf456c3dd2c94f9721d1bb0c44d2c119e233b8ad4c5189bd39b256", + "d444bb47f4a83ebd0e6b669f73bb2d6d3dde804b70a0bbd2be66693d88ce8e16", + "087be3693057db21a0b1d38844bb5efa8112f67f3572063546215f25f9fe8d9e", + "2c639c8768e323f2ad4ea96f1667989cb97d49947e9bcebcd449163d9c9bb85c", + ]; + + let checksums = metadata.get_file_checksums()?; + + assert_eq!(expected_file_checksums, checksums); + + let mut buf = Vec::new(); + + package.metadata.lead.write(&mut buf)?; + assert_eq!(96, buf.len()); + + let lead = Lead::parse(&buf)?; + assert!(package.metadata.lead == lead); + + buf = Vec::new(); + package.metadata.signature.write_signature(&mut buf)?; + let signature = Header::parse_signature(&mut buf.as_ref())?; + + assert_eq!( + package.metadata.signature.index_header, + signature.index_header + ); + + for i in 0..signature.index_entries.len() { + assert_eq!( + signature.index_entries[i], + package.metadata.signature.index_entries[i] + ); + } + assert_eq!( + package.metadata.signature.index_entries, + signature.index_entries + ); + + buf = Vec::new(); + package.metadata.header.write(&mut buf)?; + let header = Header::parse(&mut buf.as_ref())?; + assert_eq!(package.metadata.header, header); + + buf = Vec::new(); + package.write(&mut buf)?; + let second_pkg = RPMPackage::parse(&mut buf.as_ref())?; + assert_eq!(package.content.len(), second_pkg.content.len()); + assert!(package.metadata == second_pkg.metadata); + + Ok(()) +} + +#[cfg(feature = "async-futures")] +#[tokio::test] +async fn test_rpm_header_async() -> Result<(), Box> { + use tokio_util::compat::TokioAsyncReadCompatExt; + + let rpm_file_path = test_rpm_file_path(); + let mut rpm_file = tokio::fs::File::open(rpm_file_path).await?.compat(); + let package = RPMPackage::parse_async(&mut rpm_file).await?; + test_rpm_header_base(package) +} + +#[cfg(feature = "async-futures")] +#[tokio::test] +async fn test_rpm_builder_async() -> Result<(), Box> { + use std::str::FromStr; + + let mut buff = std::io::Cursor::new(Vec::::new()); + + let pkg = rpm::RPMBuilder::new("test", "1.0.0", "MIT", "x86_64", "some awesome package") + .compression(rpm::Compressor::from_str("gzip")?) + .with_file_async( + "Cargo.toml", + RPMFileOptions::new("/etc/awesome/config.toml").is_config(), + ) + .await? + // file mode is inherited from source file + .with_file_async("Cargo.toml", RPMFileOptions::new("/usr/bin/awesome")) + .await? + .with_file_async( + "Cargo.toml", + // you can set a custom mode and custom user too + RPMFileOptions::new("/etc/awesome/second.toml") + .mode(0o100744) + .user("hugo"), + ) + .await? + .pre_install_script("echo preinst") + .add_changelog_entry("me", "was awesome, eh?", 123123123) + .add_changelog_entry("you", "yeah, it was", 12312312) + .requires(Dependency::any("wget")) + .vendor("dummy vendor") + .url("dummy url") + .vcs("dummy vcs") + .build()?; + + pkg.write(&mut buff)?; + + Ok(()) +} + +#[test] +fn test_rpm_header() -> Result<(), Box> { + let rpm_file_path = test_rpm_file_path(); + let package = RPMPackage::open(rpm_file_path)?; + test_rpm_header_base(package) +} + +#[cfg(feature = "signature-meta")] +#[test] +fn test_region_tag() -> Result<(), Box> { + let region_entry = Header::create_region_tag(IndexSignatureTag::HEADER_SIGNATURES, 2, 400); + + let possible_binary = region_entry.data.as_binary(); + + assert!(possible_binary.is_some(), "should be binary"); + + let data = possible_binary.unwrap(); + + let (_, entry) = IndexEntry::::parse(data)?; + + assert_eq!(entry.tag, IndexSignatureTag::HEADER_SIGNATURES); + assert_eq!( + entry.data.type_as_u32(), + IndexData::Bin(Vec::new()).type_as_u32() + ); + assert_eq!(-48, entry.offset); + + Ok(()) +} diff --git a/rdnf/rpm/test_assets/389-ds-base-1.4.3.20-1.oe2209.x86_64.rpm b/rdnf/rpm/test_assets/389-ds-base-1.4.3.20-1.oe2209.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..4ffe754ea4c4cdc79b2f224cb2246c43e64cf2ba Binary files /dev/null and b/rdnf/rpm/test_assets/389-ds-base-1.4.3.20-1.oe2209.x86_64.rpm differ diff --git a/rdnf/rpm/test_assets/389-ds-base-2.2.3-1.fc37.x86_64.rpm b/rdnf/rpm/test_assets/389-ds-base-2.2.3-1.fc37.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..ba0ca1ce451221c0b7935f90bdbce7cf55d11a63 Binary files /dev/null and b/rdnf/rpm/test_assets/389-ds-base-2.2.3-1.fc37.x86_64.rpm differ diff --git a/rdnf/rpm/test_assets/389-ds-base-devel-1.3.8.4-15.el7.x86_64.rpm b/rdnf/rpm/test_assets/389-ds-base-devel-1.3.8.4-15.el7.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..20cea342b005f0e42044f87c187340379068eafe Binary files /dev/null and b/rdnf/rpm/test_assets/389-ds-base-devel-1.3.8.4-15.el7.x86_64.rpm differ diff --git a/rdnf/rpm/test_assets/389-ds-base-devel-2.2.3-1.fc37.x86_64.rpm b/rdnf/rpm/test_assets/389-ds-base-devel-2.2.3-1.fc37.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..3a421f6c153c7f626aba97186e8a8f4284212d28 Binary files /dev/null and b/rdnf/rpm/test_assets/389-ds-base-devel-2.2.3-1.fc37.x86_64.rpm differ diff --git a/rdnf/rpm/test_assets/RPM-GPG-KEY-pmanager b/rdnf/rpm/test_assets/RPM-GPG-KEY-pmanager new file mode 100644 index 0000000000000000000000000000000000000000..8324283d7db344a5c9620819bc6d5f38e66fa157 --- /dev/null +++ b/rdnf/rpm/test_assets/RPM-GPG-KEY-pmanager @@ -0,0 +1,37 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBF6VXl8BCACaTvVJUSwBOqefAS04j0yeG1uXLTR8//g81A4hNnF5Ak82Vb/v +IZy7i5FS+SWaa3t5RtEYdrXxV1/zDlJrvLhW1Mu/WrjVP1ZOh/J+LvjpnQocPQ9n +xWWdETutpbyVmGIyzEEs9McnCPBIvUJieKGgtnG97Zof1AzaYt36BgNKTe1qQHgR +VttASvI0iY+I4Jjm7m64jgrDD4SbcowY/f4uLwCU+aMH+NFiLwUhUPYKx8gyRqSJ +6e4q0sUotWcDa65Mn+jzRu61dW973naHDeb9IlS9oEUihJOkNL2vTBHsRmvLneCP +CoklEcx+AOHyRuF3aBBSWJe9kZO8NOWzOOcRABEBAAG0NFBhY2thZ2UgTWFuYWdl +ciAodW5wcm90ZWN0ZWQpIDxwbWFuYWdlckBleGFtcGxlLmNvbT6JAU4EEwEIADgW +IQQuWoAqZ+o2uDAY9lTP0zGSWrJ/OQUCXpVeXwIbLwULCQgHAgYVCgkICwIEFgID +AQIeAQIXgAAKCRDP0zGSWrJ/OfoXCACCxwNjoztAy9CC/2o9vcNLnxRQwbMx9M7R ++C+dVA6Ia5/Anu+nUH7UhbmuW95bDTbiAyMH3X6E49XgssEs/JabbXPkEgVB//y7 +pXxPN5k3F/VOQHnpvUkek3r3R/MI/8U1wIqgidL67dbD5iNUxngfeBKdOg/HYUzH +kcE8n9UhVh/kRZcfH0HLkBBKnVmLxBo1f6I7M7GY/csh9jiO+YFzwS3piaj7z/zG +O9YESsOGiDnvpa/s3okphQ+2AvrobdMi8NM8B01WDMfvy1GU/agv8epdhMKHtCwO +/RvEhloS//zl1BTQCoHBfgrqwr70iq96HX3Hq4kppQV1xSg54iJYuQENBF6VXl8B +CADdBd21J2+E4UUN8tzqu37DwKFhwwyuLhiqg9/of4dccYA4Ykuzypwr83lbOvkE +IX6pwPNy7aQsknNB72i8VZWoCRHRHFF5rsz1xpMAcLq5Zxo9ZBS8PTpQv+EPYCNi +47wW2bxgIKWKIPNb8O/jdMr2ACD3E9QrqmF3USw/7Kr9LPv3d+5f8wYEVlXN9sGn +49w9U5tIQoFFHlfIQm+NbH+wyjmFPYUdzCIG8g0mdO+kSzE5EkEfYZfBRtVaUiYm +8czw0DTU2LSzlVJXG1Ix5oBQtFk/OpTxCp4FdaZqJtilH2WSyNt2cjAfYGRr0Mqo +OTtUh1jB+ltGOLr3aTxUF1RhABEBAAGJAmwEGAEIACAWIQQuWoAqZ+o2uDAY9lTP +0zGSWrJ/OQUCXpVeXwIbLgFACRDP0zGSWrJ/OcB0IAQZAQgAHRYhBBWffvImHXlR +cxFSX3dQDMBW2zUhBQJelV5fAAoJEHdQDMBW2zUh0KcH/079QdCh7fNuKSyquiKJ +aKMUDEmPLNTebT4qUHa7vFLzij8GFHU54rU+W3Oe+DKZGWHaKbntgpiRxcCyB0h2 +aCyrzzMhlRwV/9+IOhod2ZuDDcxvg1VayCkCMbAaUUJMURbOxSKF1XQP6Blw0kpE +7Yp+GFyV3Fmuqn7oXFUNijq0KNNXxbOm5VMKX/aYAKiNHBFe5WTFH3dOQG/9Sj+M +nOZnPP42owRbw9a/Rn5aLCQ6A9V/Nm9e8F21w2J7W4ZWj1r5XnNXrWfo3uP0Aagl +T7vWitI5FGpQZINn3TpOujDF4MP6BX+wrrvVay0GVMNpWDJfGFvEDke74YXvnKMA +ChujlAf/ct5ySrGxWm0B+oe1y7fMma4zpovF/MLQsFa7ChrHMlg4wS3NHBSzXI3S +168Kt8THfsGlN47hWxLdHOd0uSFaP4S3FAfgcMI2SwjUBVfcQVb2eLQmrPRyskJJ +E6uC5LuXRkhw9/3erSTH32VLaT/eekX80z+y1Gu6qiJks5yjRT74Wasf5kHphSFH +de99tg4byDs+gXhv+MgsoJTDgIGdwN5+sgMwAmzr4cerLjqbaN8OQShnGGy7LL2B +gQnS3t3zJVYusSed/w/2qI2xno0vY51zEW6bsy0ADqahDZ21ci4pQgwRAV/kYpXK +bYcaAKUMat65webRo72E70vOsO+16w== +=zven +-----END PGP PUBLIC KEY BLOCK----- diff --git a/rdnf/rpm/test_assets/anthy-9100h-47.fc37.i686.rpm b/rdnf/rpm/test_assets/anthy-9100h-47.fc37.i686.rpm new file mode 100644 index 0000000000000000000000000000000000000000..316b8dfd3f042ebaeeb1d3de456c622f6d0b262d Binary files /dev/null and b/rdnf/rpm/test_assets/anthy-9100h-47.fc37.i686.rpm differ diff --git a/rdnf/rpm/test_assets/awesome.py b/rdnf/rpm/test_assets/awesome.py new file mode 100644 index 0000000000000000000000000000000000000000..dd7b93b71a4baeefaf9ec088bb1b918edf409b6e --- /dev/null +++ b/rdnf/rpm/test_assets/awesome.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 + +for x in range(1, 11): + for y in range(1, 11): + print('%d * %d = %d' % (x, y, x*y)) \ No newline at end of file diff --git a/rdnf/rpm/test_assets/awesome.toml b/rdnf/rpm/test_assets/awesome.toml new file mode 100644 index 0000000000000000000000000000000000000000..0a89568f8d9a09ad5f9391d60b7eb9966c602d85 --- /dev/null +++ b/rdnf/rpm/test_assets/awesome.toml @@ -0,0 +1,3 @@ +[check] +date = true +time = true \ No newline at end of file diff --git a/rdnf/rpm/test_assets/awesome.xml b/rdnf/rpm/test_assets/awesome.xml new file mode 100644 index 0000000000000000000000000000000000000000..7d0faf0bd8cc62b78f9593ed5da8d0804717235f --- /dev/null +++ b/rdnf/rpm/test_assets/awesome.xml @@ -0,0 +1,5 @@ + + +Hey there, this is a lil somethin somethin for the test rpm. + + \ No newline at end of file diff --git a/rdnf/rpm/test_assets/emacs-common-ess-18.10.2-6.fc36.noarch.rpm b/rdnf/rpm/test_assets/emacs-common-ess-18.10.2-6.fc36.noarch.rpm new file mode 100644 index 0000000000000000000000000000000000000000..0ce273d95debe43f7babbfcd9089684b0504dcde Binary files /dev/null and b/rdnf/rpm/test_assets/emacs-common-ess-18.10.2-6.fc36.noarch.rpm differ diff --git a/rdnf/rpm/test_assets/ima_signed.rpm b/rdnf/rpm/test_assets/ima_signed.rpm new file mode 100644 index 0000000000000000000000000000000000000000..a5c844324943f6e19a24b2b574467d9236d005c4 Binary files /dev/null and b/rdnf/rpm/test_assets/ima_signed.rpm differ diff --git a/rdnf/rpm/test_assets/kdevelop-22.08.1-2.fc37.i686.rpm b/rdnf/rpm/test_assets/kdevelop-22.08.1-2.fc37.i686.rpm new file mode 100644 index 0000000000000000000000000000000000000000..fd34fdb3acb8cb8dca0c02a812ccae78df1cab68 Binary files /dev/null and b/rdnf/rpm/test_assets/kdevelop-22.08.1-2.fc37.i686.rpm differ diff --git a/rdnf/rpm/test_assets/libgcc-10.3.1-16.oe2209.x86_64.rpm b/rdnf/rpm/test_assets/libgcc-10.3.1-16.oe2209.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..86c52e62dcfd5b7faa864bbfcf831e80d213cb34 Binary files /dev/null and b/rdnf/rpm/test_assets/libgcc-10.3.1-16.oe2209.x86_64.rpm differ diff --git a/rdnf/rpm/test_assets/libgcc-12.2.1-2.fc37.x86_64.rpm b/rdnf/rpm/test_assets/libgcc-12.2.1-2.fc37.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..f2f9b1645f25647c2b578326ba1dd0290e95e5fb Binary files /dev/null and b/rdnf/rpm/test_assets/libgcc-12.2.1-2.fc37.x86_64.rpm differ diff --git a/rdnf/rpm/test_assets/libgcc.bin b/rdnf/rpm/test_assets/libgcc.bin new file mode 100644 index 0000000000000000000000000000000000000000..c7d31358f7c8abe21df954faa6780aa87c034571 Binary files /dev/null and b/rdnf/rpm/test_assets/libgcc.bin differ diff --git a/rdnf/rpm/test_assets/libgcc.blob.bin b/rdnf/rpm/test_assets/libgcc.blob.bin new file mode 100644 index 0000000000000000000000000000000000000000..877c0db41cfb6131616fd7b601d3f689472218a1 Binary files /dev/null and b/rdnf/rpm/test_assets/libgcc.blob.bin differ diff --git a/rdnf/rpm/test_assets/monkeysphere-0.37-1.el7.noarch.rpm b/rdnf/rpm/test_assets/monkeysphere-0.37-1.el7.noarch.rpm new file mode 100644 index 0000000000000000000000000000000000000000..6889c54f2590192f57b6ba555f48a69683efe3c1 Binary files /dev/null and b/rdnf/rpm/test_assets/monkeysphere-0.37-1.el7.noarch.rpm differ diff --git a/rdnf/rpm/test_assets/public_key.asc b/rdnf/rpm/test_assets/public_key.asc new file mode 100644 index 0000000000000000000000000000000000000000..8324283d7db344a5c9620819bc6d5f38e66fa157 --- /dev/null +++ b/rdnf/rpm/test_assets/public_key.asc @@ -0,0 +1,37 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBF6VXl8BCACaTvVJUSwBOqefAS04j0yeG1uXLTR8//g81A4hNnF5Ak82Vb/v +IZy7i5FS+SWaa3t5RtEYdrXxV1/zDlJrvLhW1Mu/WrjVP1ZOh/J+LvjpnQocPQ9n +xWWdETutpbyVmGIyzEEs9McnCPBIvUJieKGgtnG97Zof1AzaYt36BgNKTe1qQHgR +VttASvI0iY+I4Jjm7m64jgrDD4SbcowY/f4uLwCU+aMH+NFiLwUhUPYKx8gyRqSJ +6e4q0sUotWcDa65Mn+jzRu61dW973naHDeb9IlS9oEUihJOkNL2vTBHsRmvLneCP +CoklEcx+AOHyRuF3aBBSWJe9kZO8NOWzOOcRABEBAAG0NFBhY2thZ2UgTWFuYWdl +ciAodW5wcm90ZWN0ZWQpIDxwbWFuYWdlckBleGFtcGxlLmNvbT6JAU4EEwEIADgW +IQQuWoAqZ+o2uDAY9lTP0zGSWrJ/OQUCXpVeXwIbLwULCQgHAgYVCgkICwIEFgID +AQIeAQIXgAAKCRDP0zGSWrJ/OfoXCACCxwNjoztAy9CC/2o9vcNLnxRQwbMx9M7R ++C+dVA6Ia5/Anu+nUH7UhbmuW95bDTbiAyMH3X6E49XgssEs/JabbXPkEgVB//y7 +pXxPN5k3F/VOQHnpvUkek3r3R/MI/8U1wIqgidL67dbD5iNUxngfeBKdOg/HYUzH +kcE8n9UhVh/kRZcfH0HLkBBKnVmLxBo1f6I7M7GY/csh9jiO+YFzwS3piaj7z/zG +O9YESsOGiDnvpa/s3okphQ+2AvrobdMi8NM8B01WDMfvy1GU/agv8epdhMKHtCwO +/RvEhloS//zl1BTQCoHBfgrqwr70iq96HX3Hq4kppQV1xSg54iJYuQENBF6VXl8B +CADdBd21J2+E4UUN8tzqu37DwKFhwwyuLhiqg9/of4dccYA4Ykuzypwr83lbOvkE +IX6pwPNy7aQsknNB72i8VZWoCRHRHFF5rsz1xpMAcLq5Zxo9ZBS8PTpQv+EPYCNi +47wW2bxgIKWKIPNb8O/jdMr2ACD3E9QrqmF3USw/7Kr9LPv3d+5f8wYEVlXN9sGn +49w9U5tIQoFFHlfIQm+NbH+wyjmFPYUdzCIG8g0mdO+kSzE5EkEfYZfBRtVaUiYm +8czw0DTU2LSzlVJXG1Ix5oBQtFk/OpTxCp4FdaZqJtilH2WSyNt2cjAfYGRr0Mqo +OTtUh1jB+ltGOLr3aTxUF1RhABEBAAGJAmwEGAEIACAWIQQuWoAqZ+o2uDAY9lTP +0zGSWrJ/OQUCXpVeXwIbLgFACRDP0zGSWrJ/OcB0IAQZAQgAHRYhBBWffvImHXlR +cxFSX3dQDMBW2zUhBQJelV5fAAoJEHdQDMBW2zUh0KcH/079QdCh7fNuKSyquiKJ +aKMUDEmPLNTebT4qUHa7vFLzij8GFHU54rU+W3Oe+DKZGWHaKbntgpiRxcCyB0h2 +aCyrzzMhlRwV/9+IOhod2ZuDDcxvg1VayCkCMbAaUUJMURbOxSKF1XQP6Blw0kpE +7Yp+GFyV3Fmuqn7oXFUNijq0KNNXxbOm5VMKX/aYAKiNHBFe5WTFH3dOQG/9Sj+M +nOZnPP42owRbw9a/Rn5aLCQ6A9V/Nm9e8F21w2J7W4ZWj1r5XnNXrWfo3uP0Aagl +T7vWitI5FGpQZINn3TpOujDF4MP6BX+wrrvVay0GVMNpWDJfGFvEDke74YXvnKMA +ChujlAf/ct5ySrGxWm0B+oe1y7fMma4zpovF/MLQsFa7ChrHMlg4wS3NHBSzXI3S +168Kt8THfsGlN47hWxLdHOd0uSFaP4S3FAfgcMI2SwjUBVfcQVb2eLQmrPRyskJJ +E6uC5LuXRkhw9/3erSTH32VLaT/eekX80z+y1Gu6qiJks5yjRT74Wasf5kHphSFH +de99tg4byDs+gXhv+MgsoJTDgIGdwN5+sgMwAmzr4cerLjqbaN8OQShnGGy7LL2B +gQnS3t3zJVYusSed/w/2qI2xno0vY51zEW6bsy0ADqahDZ21ci4pQgwRAV/kYpXK +bYcaAKUMat65webRo72E70vOsO+16w== +=zven +-----END PGP PUBLIC KEY BLOCK----- diff --git a/rdnf/rpm/test_assets/rpm-sign-4.15.1-1.fc31.x86_64.rpm b/rdnf/rpm/test_assets/rpm-sign-4.15.1-1.fc31.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..eeae84b67f9d9a4d4b403086d224402b1f0658c8 Binary files /dev/null and b/rdnf/rpm/test_assets/rpm-sign-4.15.1-1.fc31.x86_64.rpm differ diff --git a/rdnf/rpm/test_assets/rpmdb.sqlite b/rdnf/rpm/test_assets/rpmdb.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..c9dfa88f1d3d93809d10b6224cf4fc4e06ac095c Binary files /dev/null and b/rdnf/rpm/test_assets/rpmdb.sqlite differ diff --git a/rdnf/rpm/test_assets/secret_key.asc b/rdnf/rpm/test_assets/secret_key.asc new file mode 100644 index 0000000000000000000000000000000000000000..1bff1e6080483eb4f3a1232e9f86bee7385e0044 --- /dev/null +++ b/rdnf/rpm/test_assets/secret_key.asc @@ -0,0 +1,64 @@ +-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQOYBF6VXl8BCACaTvVJUSwBOqefAS04j0yeG1uXLTR8//g81A4hNnF5Ak82Vb/v +IZy7i5FS+SWaa3t5RtEYdrXxV1/zDlJrvLhW1Mu/WrjVP1ZOh/J+LvjpnQocPQ9n +xWWdETutpbyVmGIyzEEs9McnCPBIvUJieKGgtnG97Zof1AzaYt36BgNKTe1qQHgR +VttASvI0iY+I4Jjm7m64jgrDD4SbcowY/f4uLwCU+aMH+NFiLwUhUPYKx8gyRqSJ +6e4q0sUotWcDa65Mn+jzRu61dW973naHDeb9IlS9oEUihJOkNL2vTBHsRmvLneCP +CoklEcx+AOHyRuF3aBBSWJe9kZO8NOWzOOcRABEBAAEAB/wIFX6fcY2T/2gIahn9 +wnEdv9pppjw/l7PgMO5QEl95utOeSboBS8Ifp6lMfuG30C1y7jE2XkK8IusTi1Kp +UwTXxLRrYRP5lQS/HrtnHawb5gahcdf4pGRlSQG2wE3ryuTLGz0HX0f/LSZUczjS ++pWiGyjEjdnzFKt7Ne6B4paByhRElDVfxEYVRGuBGDexfWNP0jOEO22c5Xbzh4V0 +LNhtCoVB8xztNgUyk9afYC4pvP7OiAYNqJ6mH8aFzP9/MtRT2xEz/Q9ZVqh6P6ml +ybr3wG278KfnYhtL9C0WS9OjeDnwX1UNA7KupK8ulc9rsnhCAFLptJRQWsik868a +NcIBBADFZxfayElq4PEtG95cnRoxDrSdBTSDOIuexSnbrAVhY/IvMh3NqqBPx2/B +h/40IYyWXMkPzw0TZuvGT+40IK0qP38ZauL9ONhfjGwBClaxdbhK0TEThy4WZ+jX +wa7zSYIdDlgj3+B0W/OrSP9q3PwCrcWOBFO/EKbipNBjZc7DAQQAyB0SHmam6JKv +v3J7JAfGwYcCDKWgBVv2yBPuJzw2jonWpTqa0w6X5nh0hOik6a1rqB/44cRdpasI +Ot98t4bLQ5Z4Y5ADb0V+LeNp2s/CU+ezTBRQj3MhIGRKs72VXry5e/aZj1yiKITn +OklVdYROG5YA8sxdexOswjRYSTuh9BEEAJYllpW6lJ4LbQaZeXw/S1aQbbZS97Do +j5KF3R0poFnLqBdNYp7qLE46bjEUNrx7fdLLB9/Xsc9arqphpukvFbz8eW160MRz +pW/pD5SEE6LuLdzrrTkVebPCSDYDbwBgyHVXDeEYy35YB+j0hhxnBUyJBHKmdWUS +6FHwPDXMJbUCOTi0NFBhY2thZ2UgTWFuYWdlciAodW5wcm90ZWN0ZWQpIDxwbWFu +YWdlckBleGFtcGxlLmNvbT6JAU4EEwEIADgWIQQuWoAqZ+o2uDAY9lTP0zGSWrJ/ +OQUCXpVeXwIbLwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRDP0zGSWrJ/OfoX +CACCxwNjoztAy9CC/2o9vcNLnxRQwbMx9M7R+C+dVA6Ia5/Anu+nUH7UhbmuW95b +DTbiAyMH3X6E49XgssEs/JabbXPkEgVB//y7pXxPN5k3F/VOQHnpvUkek3r3R/MI +/8U1wIqgidL67dbD5iNUxngfeBKdOg/HYUzHkcE8n9UhVh/kRZcfH0HLkBBKnVmL +xBo1f6I7M7GY/csh9jiO+YFzwS3piaj7z/zGO9YESsOGiDnvpa/s3okphQ+2Avro +bdMi8NM8B01WDMfvy1GU/agv8epdhMKHtCwO/RvEhloS//zl1BTQCoHBfgrqwr70 +iq96HX3Hq4kppQV1xSg54iJYnQOYBF6VXl8BCADdBd21J2+E4UUN8tzqu37DwKFh +wwyuLhiqg9/of4dccYA4Ykuzypwr83lbOvkEIX6pwPNy7aQsknNB72i8VZWoCRHR +HFF5rsz1xpMAcLq5Zxo9ZBS8PTpQv+EPYCNi47wW2bxgIKWKIPNb8O/jdMr2ACD3 +E9QrqmF3USw/7Kr9LPv3d+5f8wYEVlXN9sGn49w9U5tIQoFFHlfIQm+NbH+wyjmF +PYUdzCIG8g0mdO+kSzE5EkEfYZfBRtVaUiYm8czw0DTU2LSzlVJXG1Ix5oBQtFk/ +OpTxCp4FdaZqJtilH2WSyNt2cjAfYGRr0MqoOTtUh1jB+ltGOLr3aTxUF1RhABEB +AAEAB/41b+ajls5CnUhZz/8TjZNfdqJOBZ96OVDjYRDwoGy0sAhlGXbeGD1b7vjH +UuHD2ulJ14nEnqoT0LCnH1/+MN8HCnJRUXxtZBLP1VvTPm4Wx1Ozxg+xV0hTH6El +W4QSs6bVC32c7Ce51q+f6gfFwYYUpgFSW2vvxrjb5TdCmVDZ9uQ7HZSIiDBcViCr +1Us9ovGua2Ju2d0D2K7+Jyl5aUtibLQb4pORtPQbV8P32ZxVA7hUfW4kGu2cW4gi +Ljmok6UwQqXgsGdla0vFz+pNcpGaMArXPrO5VKFSTn1//VAYqtXa1f/Ee4NjKx4Y +gyJY6CGCVpBz37IeUYGN4G6dXIJVBADmOR/S7j8O1+pOVBOMIyvoUXYWIwrXc1q7 +a0GD+elgDQaFUpT8RKY/QW4s8oVGde9WVysptY52AZmHvmhEeNtcx/ksF75EtpFw +5vq4laf/ahy2mKLHfkQxUUy+sOmlC3rGFODN9BiXc+ggphkIo2+ZCeQEHKAcCktg +JnGvJRjUrwQA9cUJOHnA4FX8HOZ53gxVWC7M+mv2R7SfY/aqbJvq1efpRnxSK69o +QoKhFO2ESB5pVxL9dUIHtbB4rGdqT/OSaS5y7RbliiYKoYVk9Nce3kXwCxkV31Eh +qoUEowz3FLWQB/s4AGokmueQWY6rOQLCpco5jqxMJHqKLAVUmudey+8D/3AVnjaI +xTJK3QemfGENmUX/XUFfIsd8FDjYZrWPwAlmnoJQFrZjRK1kxkC2wSH2CV3ei6SV +JPIZg0Ht+dkTL2MCgD+MTRZmY8zCc0fC9Q4L3ebsBSyjiXXWnvMOKJVL487atCXv +YQx7XTrX3S8kOsfVa+8VaD3n3zBTeE0kpUd1Ou2JAmwEGAEIACAWIQQuWoAqZ+o2 +uDAY9lTP0zGSWrJ/OQUCXpVeXwIbLgFACRDP0zGSWrJ/OcB0IAQZAQgAHRYhBBWf +fvImHXlRcxFSX3dQDMBW2zUhBQJelV5fAAoJEHdQDMBW2zUh0KcH/079QdCh7fNu +KSyquiKJaKMUDEmPLNTebT4qUHa7vFLzij8GFHU54rU+W3Oe+DKZGWHaKbntgpiR +xcCyB0h2aCyrzzMhlRwV/9+IOhod2ZuDDcxvg1VayCkCMbAaUUJMURbOxSKF1XQP +6Blw0kpE7Yp+GFyV3Fmuqn7oXFUNijq0KNNXxbOm5VMKX/aYAKiNHBFe5WTFH3dO +QG/9Sj+MnOZnPP42owRbw9a/Rn5aLCQ6A9V/Nm9e8F21w2J7W4ZWj1r5XnNXrWfo +3uP0AaglT7vWitI5FGpQZINn3TpOujDF4MP6BX+wrrvVay0GVMNpWDJfGFvEDke7 +4YXvnKMAChujlAf/ct5ySrGxWm0B+oe1y7fMma4zpovF/MLQsFa7ChrHMlg4wS3N +HBSzXI3S168Kt8THfsGlN47hWxLdHOd0uSFaP4S3FAfgcMI2SwjUBVfcQVb2eLQm +rPRyskJJE6uC5LuXRkhw9/3erSTH32VLaT/eekX80z+y1Gu6qiJks5yjRT74Wasf +5kHphSFHde99tg4byDs+gXhv+MgsoJTDgIGdwN5+sgMwAmzr4cerLjqbaN8OQShn +GGy7LL2BgQnS3t3zJVYusSed/w/2qI2xno0vY51zEW6bsy0ADqahDZ21ci4pQgwR +AV/kYpXKbYcaAKUMat65webRo72E70vOsO+16w== +=b9Sz +-----END PGP PRIVATE KEY BLOCK----- diff --git a/rdnf/rpm/test_assets/test_2.bin b/rdnf/rpm/test_assets/test_2.bin new file mode 100644 index 0000000000000000000000000000000000000000..870593cffb67fb742e4cd42e34974f4143304f27 Binary files /dev/null and b/rdnf/rpm/test_assets/test_2.bin differ diff --git a/rdnf/rpm/test_assets/test_pre.bin b/rdnf/rpm/test_assets/test_pre.bin new file mode 100644 index 0000000000000000000000000000000000000000..0b30481bfd25c0c4cbbc60920a5b5d75c530d142 Binary files /dev/null and b/rdnf/rpm/test_assets/test_pre.bin differ diff --git a/rdnf/solv-sys/Cargo.toml b/rdnf/solv-sys/Cargo.toml deleted file mode 100644 index fab10d86277c57d2154bbd9b42592740cb080500..0000000000000000000000000000000000000000 --- a/rdnf/solv-sys/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "solv-sys" -version = "0.1.4" -license = "MIT" -repository = "https://github.com/AOSC-Dev/abbs-meta-rs/tree/master/libsolv-sys" -description = "Raw bindings to libsolv" -keywords = ["libsolv", "solver", "dependency"] -authors = ["liushuyu "] -edition = "2018" - -[dependencies] -libc = "0.2" - -[build-dependencies] -cc = "1" -pkg-config = "0.3" -bindgen = "0.60" -anyhow = "1" -cmake = "0.1" diff --git a/rdnf/solv-sys/README.md b/rdnf/solv-sys/README.md deleted file mode 100644 index 26b2c4383f7d0f0152c932c91049e20d5fa6c777..0000000000000000000000000000000000000000 --- a/rdnf/solv-sys/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# `libsolv-sys` - -Low-level Rust binding for [`libsolv`](https://github.com/openSUSE/libsolv). - -The current binding is tailored to dpkg-based distro usage (Enabled `libsolvext` but only Debian support is added). - -However, the library will prefer to use the system `libsolv` if there is one. Make sure to install the development files from the package manager. diff --git a/rdnf/solv-sys/build.rs b/rdnf/solv-sys/build.rs deleted file mode 100644 index b585fd9f835fc6185b744409b843523a088170ed..0000000000000000000000000000000000000000 --- a/rdnf/solv-sys/build.rs +++ /dev/null @@ -1,116 +0,0 @@ -use anyhow::{anyhow, Result}; -use std::{ - fs, - path::{self, Path, PathBuf}, -}; - -const ALLOWED_FUNC_PREFIX: &[&str] = &[ - "map", - "policy", - "pool", - "prune", - "queue", - "repo", - "repodata", - "selection", - "solv", - "solver", - "testcase", - "transaction", - "dataiterator", - "datamatcher", - "stringpool", -]; - -fn find_system_libsolv() -> Result { - let mut conf = pkg_config::Config::new(); - let lib = conf.atleast_version("0.7").probe("libsolv")?; - conf.atleast_version("0.7").probe("libsolvext")?; - - for inc in lib.include_paths { - if inc.join("solv").is_dir() { - return Ok(inc.join("solv")); - } - } - - Err(anyhow!("Error finding libsolv include path")) -} - -fn build_libsolv() -> Result { - println!("cargo:warning=System libsolv not found. Using bundled version."); - let p = path::PathBuf::from("./libsolv/CMakeLists.txt"); - if !p.is_file() { - return Err(anyhow!( - "Bundled libsolv not found, please do `git submodule update --init`." - )); - } - let out = cmake::Config::new(p.parent().unwrap()) - .define("ENABLE_DEBIAN", "ON") - .define("DEBIAN", "ON") - .define("ENABLE_STATIC", "ON") - .define("DISABLE_SHARED", "ON") - .target(&std::env::var("CMAKE_TARGET").unwrap_or_else(|_| std::env::var("TARGET").unwrap())) - .build(); - println!( - "cargo:rustc-link-search=native={}", - out.join("lib").display() - ); - println!( - "cargo:rustc-link-search=native={}", - out.join("lib64").display() - ); - println!("cargo:rustc-link-lib=static=solv"); - println!("cargo:rustc-link-lib=static=solvext"); - println!("cargo:rustc-link-lib=z"); - - Ok(out.join("include/solv")) -} - -fn check_solvext_bindings( - include_path: &Path, - builder: bindgen::Builder, -) -> Result { - let mut builder = builder; - for inc in fs::read_dir(include_path)? { - let inc = inc?; - let name = inc.file_name(); - let name = name.to_string_lossy(); - // all the solvext include files are named like `repo_.h` - if name.starts_with("repo_") && name.ends_with(".h") { - builder = builder.header(inc.path().to_str().unwrap()); - } - } - - Ok(builder) -} - -fn generate_bindings(include_path: &Path) -> Result<()> { - let output = std::env::var("OUT_DIR")?; - let generator = bindgen::Builder::default() - .header(include_path.join("solver.h").to_str().unwrap()) - .header(include_path.join("solverdebug.h").to_str().unwrap()) - .header(include_path.join("selection.h").to_str().unwrap()) - .header(include_path.join("knownid.h").to_str().unwrap()) - .header(include_path.join("chksum.h").to_str().unwrap()) - .header(include_path.join("solv_xfopen.h").to_str().unwrap()) - .header(include_path.join("evr.h").to_str().unwrap()) - .header(include_path.join("testcase.h").to_str().unwrap()) - .allowlist_type("(Id|solv_knownid)") - .allowlist_var(".*") - .allowlist_function(format!("({}).*", ALLOWED_FUNC_PREFIX.join("|"))); - check_solvext_bindings(include_path, generator)? - .generate() - .unwrap() - .write_to_file(Path::new(&output).join("bindings.rs"))?; - Ok(()) -} - -fn main() -> Result<()> { - let include_path = match find_system_libsolv() { - Ok(p) => p, - Err(_) => build_libsolv()?, - }; - generate_bindings(&include_path)?; - - Ok(()) -} diff --git a/rdnf/solv-sys/src/ffi.rs b/rdnf/solv-sys/src/ffi.rs deleted file mode 100644 index 66e09db5b29097991743c9c24e7450b5e98b1f27..0000000000000000000000000000000000000000 --- a/rdnf/solv-sys/src/ffi.rs +++ /dev/null @@ -1,2 +0,0 @@ -#![allow(warnings)] -include!(concat!(env!("OUT_DIR"), "/bindings.rs")); diff --git a/rdnf/solv-sys/src/lib.rs b/rdnf/solv-sys/src/lib.rs deleted file mode 100644 index 32bebf2fdb89ac0269b82bc7e48feedcfbf7baaa..0000000000000000000000000000000000000000 --- a/rdnf/solv-sys/src/lib.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Low-level Rust binding for libsolv. -//! -//! The current binding is tailored to dpkg-based distro usage -//! (Enabled libsolvext but only Debian support is added). -//! -//! All the bindings are inside the `ffi` module. -//! Since this is a low-level binding library, you need to consult -//! [libsolv C API documentation](https://github.com/openSUSE/libsolv) for more information. - -/// FFI bindings for libsolv -pub mod ffi; diff --git a/rdnf/src/c_lib/mod.rs b/rdnf/src/c_lib/mod.rs deleted file mode 100644 index 12661aa0adb02647f4c3aca955050990a4046dd8..0000000000000000000000000000000000000000 --- a/rdnf/src/c_lib/mod.rs +++ /dev/null @@ -1,162 +0,0 @@ -use rpm_sys::ffi::rpmts; -use solv_sys::ffi::{Dataiterator, Map, Pool, Queue, Repo, Solvable}; - -extern "C" { - fn char_ptr_offset_bind(ptr: *const i8, offset: i32) -> *const i8; - fn queue_empty_static(q: *mut Queue); - fn queue_push_static(q: *mut Queue, id: i32); - fn queue_push2_static(q: *mut Queue, id1: i32, id2: i32); - fn create_data_iterator_empty_bind() -> Dataiterator; // Dataiterator di={0} - // fn create_Repo_empty_bind() -> s_Repo; // s_Pool p={0} - fn dataiterator_init_simple_bind( - di: *mut Dataiterator, - pool: *mut Pool, - match_: *const ::std::os::raw::c_char, - flags: ::std::os::raw::c_int, - ); - fn dataiterator_set_search_simple_bind(di: *mut Dataiterator); - fn get_queue_element_value_bind(q: *const Queue, index: ::std::os::raw::c_int) -> i32; - fn pool_id2solvable_static(pool: *const Pool, p: ::std::os::raw::c_int) -> *mut Solvable; - fn solv_add_flags_to_jobs_bind(q: *mut Queue, flags: ::std::os::raw::c_int); - fn get_pool_solvables_value_bind( - pool: *const Pool, - index: ::std::os::raw::c_uint, - ) -> *mut Solvable; - fn is_pseudo_package_static(pool: *mut Pool, s: *mut Solvable) -> i32; - fn pool_id2repo_static(pool: *const Pool, id: i32) -> *mut Repo; - fn pool_whatprovides_static(pool: *mut Pool, d: i32) -> i32; - fn get_pool_whatprovidesdata_value_bind(pool: *const Pool, index: i32) -> i32; - fn pool_match_nevr_static(pool: *mut Pool, s: *mut Solvable, d: i32) -> i32; - fn pool_disabled_solvable_static(pool: *const Pool, s: *mut Solvable) -> i32; - // fn map_empty_static(m: *mut Map); - fn map_set_static(m: *mut Map, n: i32); - fn map_setall_static(m: *mut Map); - // fn map_clr_static(m: *mut Map, n: i32); - // fn map_tst_static(m: *mut Map, n: i32); - // fn map_clr_at_static(m: *mut Map, n: i32); - // int set_callback_fn(rpmts ts,int quiet,int term_width){ - fn set_callback_fn(ts: rpmts, quiet: i32, term_width: u16) -> i32; - -} -// fn char_ptr_offset_bind(ptr:*const i8,offset:i32)->*const i8; -pub fn char_ptr_offset(ptr: *const i8, offset: i32) -> *const i8 { - unsafe { char_ptr_offset_bind(ptr, offset) } -} -#[inline] -pub fn queue_empty(q: *mut Queue) { - unsafe { - queue_empty_static(q); - } -} -#[inline] -pub fn queue_push(q: *mut Queue, id: i32) { - unsafe { - queue_push_static(q, id); - } -} -#[inline] -pub fn queue_push2(q: *mut Queue, id1: i32, id2: i32) { - unsafe { - queue_push2_static(q, id1, id2); - } -} -#[inline] -pub fn create_dataiterator_empty() -> Dataiterator { - unsafe { create_data_iterator_empty_bind() } -} -// #[inline] -// pub fn create_Repo_empty() -> s_Repo { -// unsafe { create_Repo_empty_bind() } -// } -#[inline] -pub fn dataiterator_init_simple( - di: *mut Dataiterator, - pool: *mut Pool, - match_: *const ::std::os::raw::c_char, - flags: ::std::os::raw::c_int, -) { - unsafe { - dataiterator_init_simple_bind(di, pool, match_, flags); - } -} -#[inline] -pub fn dataiterator_set_search_simple(di: *mut Dataiterator) { - unsafe { - dataiterator_set_search_simple_bind(di); - } -} -#[inline] -pub fn get_queue_element_value(q: *const Queue, index: u32) -> i32 { - unsafe { get_queue_element_value_bind(q, index as i32) } -} -#[inline] -pub fn pool_id2solvable(pool: *const Pool, p: i32) -> *mut Solvable { - unsafe { pool_id2solvable_static(pool, p) } -} -#[inline] -pub fn solv_add_flags_to_jobs(q: *mut Queue, flags: i32) { - unsafe { solv_add_flags_to_jobs_bind(q, flags) } -} -#[inline] -pub fn get_pool_solvables_value(pool: *const Pool, index: u32) -> *mut Solvable { - unsafe { get_pool_solvables_value_bind(pool, index) } -} -#[inline] -pub fn is_pseudo_package(pool: *mut Pool, s: *mut Solvable) -> bool { - unsafe { is_pseudo_package_static(pool, s) == 1 } -} -#[inline] -pub fn pool_id2repo(pool: *const Pool, id: i32) -> *mut Repo { - unsafe { pool_id2repo_static(pool, id) } -} -#[inline] -pub fn pool_whatprovides(pool: *mut Pool, id: i32) -> i32 { - unsafe { pool_whatprovides_static(pool, id) } -} -#[inline] -pub fn get_pool_whatprovidesdata_value(pool: *const Pool, index: i32) -> i32 { - unsafe { get_pool_whatprovidesdata_value_bind(pool, index) } -} -#[inline] -pub fn pool_match_nevr(pool: *mut Pool, s: *mut Solvable, d: i32) -> i32 { - unsafe { pool_match_nevr_static(pool, s, d) } -} - -pub fn pool_disabled_solvable(pool: *const Pool, s: *mut Solvable) -> bool { - let result = unsafe { pool_disabled_solvable_static(pool, s) }; - result == 1 -} -// pub fn map_empty(m: *mut Map) { -// unsafe { -// map_empty_static(m); -// } -// } - -pub fn map_set(m: *mut Map, n: i32) { - unsafe { - map_set_static(m, n); - } -} -pub fn map_setall(m: *mut Map) { - unsafe { - map_setall_static(m); - } -} -// pub fn map_clr(m: *mut Map, n: i32) { -// unsafe { -// map_clr_static(m, n); -// } -// } -// pub fn map_tst(m: *mut Map, n: i32) { -// unsafe { -// map_tst_static(m, n); -// } -// } -// pub fn map_clr_at(m: *mut Map, n: i32) { -// unsafe { -// map_clr_at_static(m, n); -// } -// } -pub fn set_callbackfunction(ts: rpmts, quiet: bool, term_width: u16) -> i32 { - unsafe { set_callback_fn(ts, quiet as i32, term_width) } -} diff --git a/rdnf/src/c_lib/queue_static.c b/rdnf/src/c_lib/queue_static.c deleted file mode 100644 index bab9143485d8dce8af280f85fca04e4b0c7d0a29..0000000000000000000000000000000000000000 --- a/rdnf/src/c_lib/queue_static.c +++ /dev/null @@ -1,159 +0,0 @@ -#include -#include -#include -#include -char* char_ptr_offset_bind(char* p,int offset){ - - return p+offset; -} -void queue_empty_static(Queue *q) -{ - if (q->alloc) - { - q->left += (q->elements - q->alloc) + q->count; - q->elements = q->alloc; - } - else - q->left += q->count; - q->count = 0; -} - -void queue_push_static(Queue *q, Id id) -{ - if (!q->left) - queue_alloc_one(q); - q->elements[q->count++] = id; - q->left--; -} - -void queue_push2_static(Queue *q, Id id1, Id id2) -{ - queue_push(q, id1); - queue_push(q, id2); -} - -Dataiterator create_data_iterator_empty_bind() -{ - Dataiterator di = {0}; - return di; -} - -Repo create_Repo_empty_bind() -{ - Repo r = {0}; - return r; -} - -void dataiterator_init_simple_bind(Dataiterator *di, Pool *pool, const char *match, int flags) -{ - dataiterator_init(di, pool, 0, 0, 0, match, flags); -} - -void dataiterator_set_search_simple_bind(Dataiterator *di) -{ - dataiterator_set_search(di, 0, 0); -} - -int get_queue_element_value_bind(Queue *q, int index) -{ - if (index >= q->count) - { - return -1; - }; - return q->elements[index]; -} - -Solvable *pool_id2solvable_static(const Pool *pool, Id p) -{ - return pool->solvables + p; -} -void solv_add_flags_to_jobs_bind(Queue *q, int flags) -{ - for (int i = 0; i < q->count; i += 2) - { - q->elements[i] |= flags; - } -} -Solvable *get_pool_solvables_value_bind(const Pool *pool, unsigned int index) -{ - return &(pool->solvables[index]); -} -int is_pseudo_package_static(Pool *pool, Solvable *s) -{ - const char *n = pool_id2str(pool, s->name); - if (*n == 'p' && !strncmp(n, "patch:", 6)) - { - return 1; - } - return 0; -} -Repo *pool_id2repo_static(Pool *pool, Id repoid) -{ - return repoid < pool->nrepos ? pool->repos[repoid] : NULL; -} -Id pool_whatprovides_static(Pool *pool, Id d) -{ - if (!ISRELDEP(d)) - { - if (pool->whatprovides[d]) - return pool->whatprovides[d]; - } - else - { - Id v = GETRELID(d); - if (pool->whatprovides_rel[v]) - return pool->whatprovides_rel[v]; - } - return pool_addrelproviders(pool, d); -} -Id get_pool_whatprovidesdata_value_bind(Pool *pool, Id index) -{ - return pool->whatprovidesdata[index]; -} -int pool_match_nevr_static(Pool *pool, Solvable *s, Id d) -{ - if (!ISRELDEP(d)) - return d == s->name; - else - return pool_match_nevr_rel(pool, s, d); -} -int pool_disabled_solvable_static(const Pool *pool, Solvable *s) -{ - if (s->repo && s->repo->disabled) - return 1; - if (pool->considered) - { - Id id = s - pool->solvables; - if (!MAPTST(pool->considered, id)) - return 1; - } - return 0; -} - -void map_empty_static(Map *m) -{ - MAPZERO(m); -} -void map_set_static(Map *m, int n) -{ - MAPSET(m, n); -} -void map_setall_static(Map *m) -{ - MAPSETALL(m); -} -void map_clr_static(Map *m, int n) -{ - MAPCLR(m, n); -} -int map_tst_static(Map *m, int n) -{ - return MAPTST(m, n); -} -void map_clr_at_static(Map *m, int n) -{ - MAPCLR_AT(m, n); -} - - -// void rdnf_set diff --git a/rdnf/src/c_lib/rpm_trans.c b/rdnf/src/c_lib/rpm_trans.c deleted file mode 100644 index 044793872daa372b52d7f754ff523b69cf3ad2c1..0000000000000000000000000000000000000000 --- a/rdnf/src/c_lib/rpm_trans.c +++ /dev/null @@ -1,129 +0,0 @@ -#include -#include -#include -#include -#include -typedef struct _CALL_BACK_CONTEXT_ -{ - int quiet; - FD_t fd; -} CallbackContext; - -void *rdnf_callback_fn( - const void *pArg, - const rpmCallbackType what, - const rpm_loff_t amount, - const rpm_loff_t total, - fnpyKey key, - rpmCallbackData data) -{ - Header pkg_header_ptr = (Header)pArg; - void *pResult = NULL; - char *file_path = (char *)key; - CallbackContext *context = (CallbackContext *)data; - int quiet=context->quiet %2; - int term_width=context->quiet >> 1; - char *nevra = headerGetAsString(pkg_header_ptr, RPMTAG_NEVRA); - int len=0; - term_width=term_width > 250?250:term_width; - term_width=term_width < 80 ? 80:term_width; - switch (what) - { - case RPMCALLBACK_INST_OPEN_FILE: - if ((!file_path) || !(*file_path)) - { - return NULL; - } - context->fd = Fopen(file_path, "r.udfio"); - return (void *)context->fd; - break; - case RPMCALLBACK_INST_CLOSE_FILE: - if (context->fd) - { - Fclose(context->fd); - context->fd = NULL; - } - break; - case RPMCALLBACK_INST_START: - if (!quiet) - { - len= term_width - strlen("Installing") - 2; - printf("%-*s\e[32mInstalling\e[0m \r", len, nevra); - (void)fflush(stdout); - } - break; - case RPMCALLBACK_INST_STOP: - if(!quiet){ - len=term_width-strlen("Installed")-2; - printf("%-*s\e[32mInstalled\e[0m \n", len, nevra); - (void)fflush(stdout); - } - break; - case RPMCALLBACK_UNINST_START: - if (!quiet) - { - len = term_width - strlen("Removing") - 2; - printf("%-*s\e[32mRemoving\e[0m \r", len, nevra); - (void)fflush(stdout); - } - break; - case RPMCALLBACK_UNINST_STOP: - if (!quiet){ - len=term_width-strlen("Removed")-2; - printf("%-*s\e[32mRemoved\e[0m \n",len,nevra); - (void)fflush(stdout); - } - break; - case RPMCALLBACK_SCRIPT_ERROR: - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=216221#c15 */ - const char *pszScript; - switch (amount) - { - case RPMTAG_PREIN: - pszScript = "%prein"; - break; - case RPMTAG_POSTIN: - pszScript = "%postin"; - break; - case RPMTAG_PREUN: - pszScript = "%preun"; - break; - case RPMTAG_POSTUN: - pszScript = "%postun"; - break; - default: - pszScript = "(unknown)"; - break; - } - /* %pre and %preun will cause errors (install/uninstall will fail), - other scripts just warn (install/uninstall will succeed) */ - if (total == RPMRC_OK) - { - len = term_width - strlen("warning in ") - 12; - printf("%-*s\e[33mwarning in \e[0m%-*s", len, nevra, 12, pszScript); - (void)fflush(stdout); - } - else - { - len = term_width- strlen("error in ") - 12; - printf("%-*s\e[31error in \e[0m%-*s", len, nevra, 12, pszScript); - (void)fflush(stdout); - } - } - break; - default: - break; - } - if (nevra!=NULL){ - free(nevra); - } - return pResult; -} -int set_callback_fn(rpmts ts, int quiet, uint16_t term_width) -{ - CallbackContext p = {0}; - p.quiet = term_width <<1; - p.quiet +=quiet; - return rpmtsSetNotifyCallback(ts, rdnf_callback_fn, (void *)&p); -} \ No newline at end of file diff --git a/rdnf/src/cache/db.rs b/rdnf/src/cache/db.rs new file mode 100644 index 0000000000000000000000000000000000000000..53f76f515f15efb087648eb7b57eed53f3249b45 --- /dev/null +++ b/rdnf/src/cache/db.rs @@ -0,0 +1,57 @@ +use anyhow::Result; +use rocksdb::DBCompactionStyle; +use std::path::Path; +use std::sync::Arc; + +use indradb::{Database, RocksdbDatastore}; + +use crate::conf::repo_conf::RepoConfig; +use crate::repo::Repo; + +use super::parse_xml::{PkgDetailState, XmlState}; +use super::read_metadata::read_metadata_from_xml; +use super::repo_data::RepoData; + +fn db_opt() -> rocksdb::Options { + let mut opt = RocksdbDatastore::get_options(None); + opt.set_write_buffer_size(32 * 1024 * 1024); + opt.set_enable_pipelined_write(true); + opt.set_max_write_buffer_number(32); + opt.set_min_write_buffer_number(2); + opt.set_max_total_wal_size(16 * 1024 * 1024); + opt.set_compaction_style(DBCompactionStyle::Fifo); + opt.set_keep_log_file_num(5); + opt +} +pub fn create_db>(path: P) -> Result>> { + let db = RocksdbDatastore::new_db_with_options(path, &db_opt())?; + db.index_property(PkgDetailState::Name.as_identifier()) + .unwrap(); + db.index_property(XmlState::Entry.as_identifier().unwrap()) + .unwrap(); + Ok(Arc::new(db)) +} +pub fn open_db>(path: P) -> Result>> { + Ok(Arc::new(RocksdbDatastore::new_db_with_options( + path, + &db_opt(), + )?)) +} +impl Repo { + pub async fn create>( + config: Arc, + db_path: P, + xml_path: P, + ) -> Result { + let db = create_db(db_path.as_ref())?; + let data = RepoData::new(&db); + read_metadata_from_xml(&db, xml_path).await?; + Ok(Self { data, config }) + } + pub async fn open>(config: Arc, db_path: P) -> Result { + let db = open_db(db_path)?; + let data = RepoData::new(&db); + Ok(Self { data, config }) + } +} + diff --git a/rdnf/src/cache/installed.rs b/rdnf/src/cache/installed.rs new file mode 100644 index 0000000000000000000000000000000000000000..8605c425d54fcd6875e74e6dc98fea80d4413835 --- /dev/null +++ b/rdnf/src/cache/installed.rs @@ -0,0 +1,188 @@ +use std::{ + fs::OpenOptions, + io::{Read, Write}, + ops::Deref, + path::Path, + sync::Arc, +}; + +use crate::{ + conf::config_main::ConfigMain, + default::{INSTALLED_CACHE_DIR_NAME, INSTALLED_SEQ}, + utils::recursively_remove_dir, +}; + +use super::{ + db::{create_db, open_db}, + parse_rpm::read_metadata_from_rpm_headers, + repo_data::RepoData, +}; +use anyhow::{bail, Result}; +use indradb::RocksdbDatastore; +use rpm::{db::parse_pkg_blob_to_header, Header, IndexTag}; +use rusqlite::Connection; +use uuid::Uuid; + +pub struct InstalledRepo { + pub data: RepoData, +} +impl Clone for InstalledRepo { + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + } + } +} +impl Deref for InstalledRepo { + type Target = RepoData; + + fn deref(&self) -> &Self::Target { + &self.data + } +} +impl InstalledRepo { + pub async fn open(conf: &ConfigMain, sqlite_path: &str) -> Result { + let flag_path = conf.cachedir.clone() + INSTALLED_SEQ; + let db_path = conf.cachedir.clone() + INSTALLED_CACHE_DIR_NAME; + let rpmdb_conn = Connection::open(sqlite_path)?; + if is_should_sync(flag_path.as_str(), &rpmdb_conn)? { + recursively_remove_dir(db_path.as_str())?; + let db = create_db(db_path)?; + let mut headers = Vec::new(); + for header in get_all_pkg_headers(&rpmdb_conn)? { + headers.push((get_uuid(&header), header)); + } + read_metadata_from_rpm_headers(db.clone(), 2, headers).await?; + update_sync_seq_flag(flag_path, get_sqlite_seq(&rpmdb_conn)?)?; + Ok(Self { + data: RepoData::new(&db), + }) + } else { + Ok(Self { + data: RepoData::new(&open_db(db_path)?), + }) + } + } + pub fn get_repo_name() -> Arc { + Arc::new("Installed".to_string()) + } +} +#[inline] +fn get_sqlite_seq(conn: &Connection) -> Result { + let mut stmt = conn.prepare("select seq from sqlite_sequence").unwrap(); + let res = stmt.query_and_then([], |row| row.get::<_, u32>(0)).unwrap(); + let mut v = Vec::new(); + for ele in res { + match ele { + Ok(s) => v.push(s), + Err(_) => { + bail!("Failed to read sqlite_sequence from rpmdb.sqlite") + } + } + } + match v.pop() { + Some(s) => Ok(s), + None => { + bail!("Failed to read sqlite_sequence from rpmdb.sqlite ; maybe table sqlite_sequence doesn't have data"); + } + } +} +fn get_uuid(header: &Header) -> Uuid { + match header.get_sig_md5() { + Ok(b) => { + match Uuid::from_slice(b) { + Ok(s) => { + return s; + } + Err(_) => {} + }; + } + Err(_) => {} + } + return *uuid::Builder::from_md5_bytes(md5::compute(header).0).as_uuid(); +} +fn update_sync_seq_flag>(flag_path: P, seq: u32) -> Result<()> { + let mut flag_file = OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(flag_path)?; + flag_file.write_all(seq.to_string().as_bytes())?; + Ok(()) +} +fn is_should_sync>(flag_path: P, conn: &Connection) -> Result { + if flag_path.as_ref().to_path_buf().exists() { + let mut flag_file = OpenOptions::new().read(true).open(flag_path)?; + let mut buf = String::new(); + flag_file.read_to_string(&mut buf)?; + let cache_seq = buf.parse::()?; + let seq = get_sqlite_seq(conn)?; + Ok(seq != cache_seq) + } else { + Ok(true) + } +} + +pub fn get_all_pkg_headers(conn: &Connection) -> Result>> { + let mut stmt = conn.prepare("SELECT Packages.blob from Packages")?; + let res = stmt.query_and_then([], |row| row.get::<_, Vec>(0))?; + let mut v = Vec::new(); + for ele in res { + v.push(parse_pkg_blob_to_header(ele?.as_mut()).unwrap()); + } + Ok(v) +} + +mod tests { + use std::{alloc::System, sync::Arc, time::SystemTime}; + + use rusqlite::Connection; + + use crate::conf::config_main::ConfigMain; + + use super::{get_sqlite_seq, update_sync_seq_flag, InstalledRepo}; + + const TEST_DB: &str = "assest/rpmdb.sqlite"; + #[test] + fn test_sqlite_seq() { + let start = SystemTime::now(); + let conn = Connection::open(TEST_DB).unwrap(); + assert_eq!(get_sqlite_seq(&conn).unwrap(), 277); + dbg!(SystemTime::now().duration_since(start)); + } + #[test] + fn test_seq_flag() { + update_sync_seq_flag("tests", 2).unwrap(); + } + #[tokio::test] + async fn test_open() { + let mut conf = ConfigMain::default(); + conf.cachedir = "tests/".to_string(); + let installed_repo = InstalledRepo::open(&conf, TEST_DB).await.unwrap(); + // let p=installed_repo.get_pkg_detail_by_name("libgcc").await; + // dbg!(p); + let p = installed_repo + .data + .get_what_pkg_provide_entry_by_name(Arc::new("libgcc(x86-64)".to_string())) + .await; + dbg!(p); + } + #[tokio::test] + async fn test_open_file() { + let mut conf = ConfigMain::default(); + conf.cachedir = "tests/".to_string(); + let installed_repo = InstalledRepo::open(&conf, TEST_DB).await.unwrap(); + // let p=installed_repo.get_pkg_detail_by_name("libgcc").await; + // dbg!(p); + // let mut p = installed_repo + // .data + // .clone() + // .get_what_pkg_provide_file_by_path(Arc::new("/bin/sh".to_string())) + // .await + // .unwrap(); + // dbg!(p); + // let pkg_uuid = p.pop().unwrap().0; + // let p = installed_repo.data.get_pkg_detail_by_uuid(pkg_uuid).await; + // dbg!(p); + } +} diff --git a/rdnf/src/cache/mmap_file.rs b/rdnf/src/cache/mmap_file.rs new file mode 100644 index 0000000000000000000000000000000000000000..fa13e7b150336baf8dd00b398bda79c1ba5a5767 --- /dev/null +++ b/rdnf/src/cache/mmap_file.rs @@ -0,0 +1,91 @@ +use std::{cmp::min, collections::BTreeSet, io::Read}; + +use aho_corasick::AhoCorasick; +use anyhow::Result; +use memmap2::{Advice, Mmap, MmapOptions}; +use tokio::fs::File; + +pub struct MmapSlice { + inner: Mmap, + pos: usize, +} +impl MmapSlice { + // >(path: P) + pub fn new( + file: &File, + offset: Option, + len: Option, + advice: Advice, + ) -> Result { + let inner = unsafe { + let mut mmap_builder = MmapOptions::new(); + let mmap_builder = mmap_builder.offset(offset.unwrap_or_default()); + let mmap_builder = match len { + Some(s) => mmap_builder.len(s), + None => mmap_builder, + }; + mmap_builder.map(file) + }?; + inner.advise(advice)?; + Ok(Self { inner, pos: 0 }) + } + pub async fn splitn_file_at(file: &File, n: usize, mid: &[u8]) -> Result> { + let metadata = file.metadata().await.unwrap(); + let file_len = metadata.len(); + let offset_add = file_len / n as u64; + let mut offset = 0; + let ac = AhoCorasick::new([mid]); + let mut offset_set = BTreeSet::new(); + for _ in 0..n { + let mmap = Self::new(file, Some(offset), None, Advice::Sequential)?; + match ac.stream_find_iter(mmap).next() { + Some(r) => match r { + Ok(m) => { + offset_set.insert(offset + m.start() as u64); + } + Err(_) => {} + }, + None => {} + }; + offset = offset + offset_add; + } + offset_set.insert(file_len); + let mut offset_iter = offset_set.iter(); + let mut last = offset_iter.next().unwrap().clone(); + let mut mmap_parts = Vec::new(); + loop { + match offset_iter.next() { + Some(offset) => { + mmap_parts.push(Self::new( + file, + Some(last), + Some((offset - last).try_into()?), + Advice::Sequential, + )?); + last = *offset; + } + None => { + break; + } + } + } + Ok(mmap_parts) + } + // pub fn +} +impl Read for MmapSlice { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + let amt = min( + min(buf.len(), libc::ssize_t::MAX as usize), + self.inner.len() - self.pos, + ); + if amt == 1 { + buf[0] = self.inner[self.pos]; + } else { + buf[..amt].copy_from_slice(&self.inner[self.pos..self.pos + amt]); + } + self.pos = self.pos + amt; + Ok(amt) + } +} diff --git a/rdnf/src/cache/mod.rs b/rdnf/src/cache/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..bbb1296d331f8a7949e0b16c345d57791bcb2214 --- /dev/null +++ b/rdnf/src/cache/mod.rs @@ -0,0 +1,54 @@ +pub mod read_metadata; + +pub mod db; +pub mod installed; +mod mmap_file; +pub mod model; +pub mod parse_rpm; +pub mod parse_xml; +pub mod repo_data; +pub mod ver; +use std::{path::Path, sync::Arc}; + +use crate::{ + conf::{repo_conf::RepoConfig, repomd::Repomd}, + default::SOLVCACHE_DIR_NAME, + repo::Repo, + utils::download_single_file_with_pb_decode, +}; +use anyhow::Result; +use indicatif::ProgressBar; +use reqwest::Client; +impl RepoConfig { + pub async fn get_repo( + self, + pb: ProgressBar, + repomd: Repomd, + client: Client, + cache_dir: String, + ) -> Result { + let href = repomd.get_primary_location(); + let file_path_with_gz: String = cache_dir.clone() + href; + let file_path = file_path_with_gz.trim_end_matches(".gz"); + let db_path = cache_dir + SOLVCACHE_DIR_NAME; + if !Path::new(file_path).exists() { + let url = self.detail.base_url.as_ref().unwrap().to_owned() + href; + let size = repomd.get_primary_size(); + pb.set_length(size); + let msg = self.full_name.to_owned() + "-" + "primary"; + download_single_file_with_pb_decode( + &client, + &url, + file_path.to_string(), + msg.as_str(), + &pb, + ) + .await?; + let repo = Repo::create(Arc::new(self), db_path, file_path.to_string()).await?; + pb.finish_with_message(msg); + Ok(repo) + } else { + Repo::open(Arc::new(self), db_path).await + } + } +} diff --git a/rdnf/src/cache/model.rs b/rdnf/src/cache/model.rs new file mode 100644 index 0000000000000000000000000000000000000000..3255baa0613c45be7eca9e9ddf717845ac68d7e5 --- /dev/null +++ b/rdnf/src/cache/model.rs @@ -0,0 +1,110 @@ +use indradb::Identifier; + +use serde::{Deserialize, Serialize}; + +use super::ver::Version; + +#[derive(Default, Serialize, Deserialize, Debug)] +pub struct EntryDetail { + pub entry_name: String, + pub flags: Option, + pub epoch: Option, + pub ver: Option, + pub rel: Option, + pub pre: bool, + pub pkg_name: String, + pub arch: String, +} +impl EntryDetail { + pub fn new(pkg_name: &str, arch: &String) -> Self { + let mut entry_detail = EntryDetail::default(); + entry_detail.pkg_name = pkg_name.to_string(); + entry_detail.arch = arch.clone(); + entry_detail + } + pub fn as_identifier() -> Identifier { + Identifier::new("entry_detail").unwrap() + } + pub fn name_as_identifier() -> Identifier { + Identifier::new("entry_name").unwrap() + } +} +#[derive(Default, Serialize, Deserialize, Debug)] +pub struct FileDetail { + pub file_path: String, + pub pkg_name: String, + pub arch: String, +} +impl FileDetail { + pub fn new(file_path: &str, pkg_name: &str, arch: &str) -> Self { + Self { + file_path: file_path.to_string(), + pkg_name: pkg_name.to_string(), + arch: arch.to_string(), + } + } + pub fn as_identifier() -> Identifier { + Identifier::new("file_detail").unwrap() + } +} +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct PkgDetail { + pub name: String, + pub arch: String, + pub version: Version, + pub checksum: CheckSum, + pub summary: String, + pub description: String, + pub packager: String, + pub url: String, + pub time: Time, + pub size: Size, + pub location: Option, +} +impl PkgDetail { + pub fn as_identifier() -> Identifier { + Identifier::new("PkgDetail").unwrap() + } +} +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct CheckSum { + pub r#type: String, + pub pkgid: String, + pub checksum: String, +} +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct Time { + pub file: Option, + pub build: Option, +} +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct Size { + pub package: Option, + pub installed: Option, + pub archive: Option, +} +// GPLv2+ and BSD and MIT and IBM and MPLv2.0 +// Fedora Project +// Unspecified +// buildvm-x86-23.iad2.fedoraproject.org +// 0ad-0.0.26-3.fc37.src.rpm +// +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct FormatDetail { + pub license: Option, + pub vendor: Option, + pub group: Option, + pub build_host: Option, + pub source_rpm: Option, + pub header_range: Option, +} +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct HeaderRange { + pub start: u32, + pub end: u32, +} +impl FormatDetail { + pub fn as_identifier() -> Identifier { + Identifier::new("FormatDetail").unwrap() + } +} diff --git a/rdnf/src/cache/parse_rpm.rs b/rdnf/src/cache/parse_rpm.rs new file mode 100644 index 0000000000000000000000000000000000000000..c248107783d0ca3552e69d46079f75503ee56841 --- /dev/null +++ b/rdnf/src/cache/parse_rpm.rs @@ -0,0 +1,197 @@ +use std::sync::Arc; + +use super::{ + model::{CheckSum, EntryDetail, FormatDetail, PkgDetail}, + parse_xml::{PkgDetailState, Relation}, + read_metadata::{BulkInsert, BulkSender}, + ver::Version, +}; +use anyhow::Result; +use async_channel::bounded; +use indradb::{ijson, BulkInsertItem, Database, Datastore}; +use rpm::{Header, IndexTag, RpmEntry}; +use uuid::Uuid; +pub async fn read_metadata_from_rpm_headers( + db: Arc>, + task_size: usize, + headers: Vec<(Uuid, Header)>, +) -> Result<()> { + let (sender, receiver) = bounded::>(4); + let bulk_inseter = BulkInsert::new(receiver, db, task_size).unwrap(); + let mut bulk_sender = BulkSender::new(sender.clone(), 1000); + for (pkg_uuid, header) in headers { + let pkg_name = header.get_name().unwrap_or_default().to_string(); + let pkg_arch = header.get_arch().unwrap_or_default().to_string(); + parse_pkg_detail(&header, &mut bulk_sender, pkg_uuid, &pkg_name, &pkg_arch).await; + parse_pkg_format(&header, &mut bulk_sender, pkg_uuid).await; + parse_pkg_entry(&header, &mut bulk_sender, pkg_uuid, &pkg_name, &pkg_arch).await; + parse_pkg_file(&header, &mut bulk_sender, pkg_uuid, &pkg_name, &pkg_arch).await; + } + bulk_sender.flush().await; + sender.close(); + bulk_inseter.close().await; + Ok(()) +} +async fn parse_pkg_detail( + header: &Header, + bulk_sender: &mut BulkSender, + pkg_uuid: Uuid, + pkg_name: &str, + pkg_arch: &str, +) { + let mut pkg_detail = PkgDetail::default(); + pkg_detail.name = pkg_name.to_owned(); + pkg_detail.arch = pkg_arch.to_owned(); + pkg_detail.version = Version { + epoch: header.get_epoch().ok(), + version: header.get_version().ok().map(String::from), + release: header.get_release().ok().map(String::from), + }; + pkg_detail.checksum = CheckSum { + r#type: "md5".to_string(), + pkgid: "Yes".to_string(), + checksum: pkg_uuid.to_string(), + }; + pkg_detail.summary = header.get_summary().unwrap_or_default().join(";"); + pkg_detail.description = header.get_description().unwrap_or_default().join("\n"); + pkg_detail.packager = header.get_packager().unwrap_or_default().to_string(); + pkg_detail.url = header.get_url().unwrap_or_default().to_string(); + pkg_detail.time.build = header.get_build_time().ok(); + pkg_detail.time.file = match header.get_file_times() { + Ok(mut s) => { + s.sort(); + s.last().map(|x| *x) + } + Err(_) => pkg_detail.time.build, + }; + pkg_detail.size.installed = header.get_installed_size().ok(); + pkg_detail.size.archive = header.get_archive_size().ok(); + bulk_sender.create_package_vertex(pkg_uuid).await; + bulk_sender + .push(BulkInsertItem::VertexProperty( + pkg_uuid, + PkgDetailState::Name.as_identifier(), + ijson!(pkg_detail.name), + )) + .await; + bulk_sender + .push(BulkInsertItem::VertexProperty( + pkg_uuid, + PkgDetail::as_identifier(), + ijson!(pkg_detail), + )) + .await; +} +async fn parse_pkg_format(header: &Header, bulk_sender: &mut BulkSender, pkg_uuid: Uuid) { + let mut format_detail = FormatDetail::default(); + format_detail.license = header.get_license().ok().map(String::from); + format_detail.vendor = header.get_vendor().ok().map(String::from); + format_detail.group = header.get_group().ok().map(|x| x.join(";")); + format_detail.build_host = header.get_build_host().ok().map(String::from); + format_detail.source_rpm = header.get_source_rpm().ok().map(String::from); + bulk_sender + .push(BulkInsertItem::VertexProperty( + pkg_uuid, + FormatDetail::as_identifier(), + ijson!(format_detail), + )) + .await; +} +async fn parse_pkg_entry( + header: &Header, + bulk_sender: &mut BulkSender, + pkg_uuid: Uuid, + pkg_name: &str, + pkg_arch: &str, +) { + let entry_builder = EntryDetailBuilder::new(pkg_name, pkg_arch); + for provide_rpm_entry in header.get_provides().unwrap() { + let entry_detail = entry_builder.build_from_rpm_entry(provide_rpm_entry); + bulk_sender.send_provide_entry(entry_detail, pkg_uuid).await; + } + match header.get_requires() { + Ok(v) => { + bulk_sender + .sender_rpm_entry(v, &entry_builder, pkg_uuid, Relation::Requires) + .await; + } + Err(_) => {} + } + match header.get_conflicts() { + Ok(v) => { + bulk_sender + .sender_rpm_entry(v, &entry_builder, pkg_uuid, Relation::Conflicts) + .await; + } + Err(_) => {} + } + match header.get_obsoletes() { + Ok(v) => { + bulk_sender + .sender_rpm_entry(v, &entry_builder, pkg_uuid, Relation::Obsoletes) + .await; + } + Err(_) => {} + } +} +async fn parse_pkg_file( + header: &Header, + bulk_sender: &mut BulkSender, + pkg_uuid: Uuid, + pkg_name: &str, + pkg_arch: &str, +) { + if let Ok(files) = header.get_file_paths() { + for file in files { + if let Some(file_path) = file.to_str() { + bulk_sender + .send_file_entry(file_path, pkg_uuid, &pkg_name, &pkg_arch) + .await; + } + } + } +} +struct EntryDetailBuilder { + pkg_name: String, + arch: String, +} +impl EntryDetailBuilder { + fn new(pkg_name: &str, arch: &str) -> Self { + Self { + pkg_name: pkg_name.to_string(), + arch: arch.to_owned(), + } + } + fn build_from_rpm_entry(&self, rpm_entry: RpmEntry) -> EntryDetail { + EntryDetail { + entry_name: rpm_entry.name, + flags: rpm_entry.flags, + epoch: rpm_entry.epoch.map(|x| x.parse::().unwrap()), + ver: rpm_entry.version, + rel: rpm_entry.release, + pre: false, + pkg_name: self.pkg_name.clone(), + arch: self.arch.clone(), + } + } +} +impl BulkSender { + async fn sender_rpm_entry( + &mut self, + v: Vec, + entry_builder: &EntryDetailBuilder, + pkg_id: Uuid, + relation: Relation, + ) { + let mut res = Vec::new(); + for rpm_entry in v { + res.push(entry_builder.build_from_rpm_entry(rpm_entry)); + } + self.push(BulkInsertItem::VertexProperty( + pkg_id, + relation.as_identifier(), + ijson!(res), + )) + .await; + } +} diff --git a/rdnf/src/cache/parse_xml.rs b/rdnf/src/cache/parse_xml.rs new file mode 100644 index 0000000000000000000000000000000000000000..85f2ce13e0ecb486d6954895476fdce41ab69c31 --- /dev/null +++ b/rdnf/src/cache/parse_xml.rs @@ -0,0 +1,597 @@ +use std::borrow::Cow; +use std::io::BufReader; + +use anyhow::bail; +use anyhow::Result; +use indradb::ijson; +use indradb::BulkInsertItem; +use indradb::Edge; +use indradb::Identifier; +use quick_xml::events::attributes::Attributes; +use quick_xml::events::Event; +use quick_xml::Reader; +use serde_json::Map; +use serde_json::Value; +use uuid::Uuid; + +use super::mmap_file::MmapSlice; +use super::model::EntryDetail; +use super::model::FileDetail; +use super::model::FormatDetail; +use super::model::HeaderRange; +use super::model::PkgDetail; +use super::read_metadata::BulkSender; + +pub enum XmlState { + Ignore, + Package, + PkgDetail(PkgDetailState), + Format, + FormatDetail(FormatDetailState), + Relation(Relation), + Entry, +} + +impl XmlState { + pub fn as_identifier(self) -> Result { + match self { + XmlState::Package => Ok(Identifier::new("package").unwrap()), + XmlState::PkgDetail(pkg) => Ok(pkg.as_identifier()), + XmlState::FormatDetail(format) => Ok(format.as_identifier()), + XmlState::Entry => Ok(Identifier::new("entry")?), + _ => { + bail!("invalid identifier") + } + } + } + pub fn from_bytes(bytes: &[u8]) -> Self { + match bytes { + b"package" => XmlState::Package, + b"name" => XmlState::PkgDetail(PkgDetailState::Name), + b"arch" => XmlState::PkgDetail(PkgDetailState::Arch), + b"version" => XmlState::PkgDetail(PkgDetailState::Version), + b"checksum" => XmlState::PkgDetail(PkgDetailState::CheckSum), + b"summary" => XmlState::PkgDetail(PkgDetailState::Summary), + b"description" => XmlState::PkgDetail(PkgDetailState::Description), + b"packager" => XmlState::PkgDetail(PkgDetailState::Packager), + b"url" => XmlState::PkgDetail(PkgDetailState::Url), + b"time" => XmlState::PkgDetail(PkgDetailState::Time), + b"size" => XmlState::PkgDetail(PkgDetailState::Size), + b"location" => XmlState::PkgDetail(PkgDetailState::Location), + b"format" => XmlState::Format, + b"rpm:license" => XmlState::FormatDetail(FormatDetailState::License), + b"rpm:vendor" => XmlState::FormatDetail(FormatDetailState::Vendor), + b"rpm:group" => XmlState::FormatDetail(FormatDetailState::Group), + b"rpm:buildhost" => XmlState::FormatDetail(FormatDetailState::BuildHost), + b"rpm:sourcerpm" => XmlState::FormatDetail(FormatDetailState::SourceRpm), + b"rpm:header-range" => XmlState::FormatDetail(FormatDetailState::HeaderRange), + b"rpm:provides" => XmlState::Relation(Relation::Provides), + b"rpm:requires" => XmlState::Relation(Relation::Requires), + b"rpm:conflicts" => XmlState::Relation(Relation::Conflicts), + b"rpm:obsoletes" => XmlState::Relation(Relation::Obsoletes), + b"rpm:recommends" => XmlState::Relation(Relation::Recommends), + b"rpm:suggests" => XmlState::Relation(Relation::Suggests), + b"rpm:supplements" => XmlState::Relation(Relation::Supplements), + b"file" => XmlState::FormatDetail(FormatDetailState::File), + b"rpm:entry" => XmlState::Entry, + _ => XmlState::Ignore, + } + } +} +pub enum PkgDetailState { + Name, + Arch, + Version, + CheckSum, + Summary, + Description, + Packager, + Url, + Time, + Size, + Location, +} + +impl PkgDetailState { + pub fn as_identifier(self) -> Identifier { + match self { + PkgDetailState::Name => Identifier::new("pkg_name").unwrap(), + PkgDetailState::Arch => Identifier::new("arch").unwrap(), + PkgDetailState::Version => Identifier::new("version").unwrap(), + PkgDetailState::CheckSum => Identifier::new("checksum").unwrap(), + PkgDetailState::Summary => Identifier::new("summary").unwrap(), + PkgDetailState::Description => Identifier::new("description").unwrap(), + PkgDetailState::Packager => Identifier::new("packager").unwrap(), + PkgDetailState::Url => Identifier::new("url").unwrap(), + PkgDetailState::Time => Identifier::new("time").unwrap(), + PkgDetailState::Size => Identifier::new("size").unwrap(), + PkgDetailState::Location => Identifier::new("location").unwrap(), + } + } +} +pub enum FormatDetailState { + License, + Vendor, + Group, + BuildHost, + SourceRpm, + HeaderRange, + File, +} +impl FormatDetailState { + pub fn as_identifier(self) -> Identifier { + match self { + FormatDetailState::File => Identifier::new("file").unwrap(), + FormatDetailState::License => Identifier::new("license").unwrap(), + FormatDetailState::Group => Identifier::new("group").unwrap(), + FormatDetailState::SourceRpm => Identifier::new("sourcerpm").unwrap(), + FormatDetailState::Vendor => Identifier::new("vendor").unwrap(), + FormatDetailState::BuildHost => Identifier::new("buildhost").unwrap(), + FormatDetailState::HeaderRange => Identifier::new("header_range").unwrap(), + } + } +} +pub enum Relation { + Provides, + Requires, + Conflicts, + Obsoletes, + Recommends, + Suggests, + Supplements, +} + +impl Relation { + pub fn as_identifier(&self) -> Identifier { + match &self { + Relation::Provides => Identifier::new("provides").unwrap(), + Relation::Requires => Identifier::new("requires").unwrap(), + Relation::Conflicts => Identifier::new("conflicts").unwrap(), + Relation::Obsoletes => Identifier::new("obsoletes").unwrap(), + Relation::Recommends => Identifier::new("recommends").unwrap(), + Relation::Suggests => Identifier::new("suggests").unwrap(), + Relation::Supplements => Identifier::new("supplements").unwrap(), + } + } +} +#[allow(dead_code)] +pub fn attrs_to_map(attrs: Attributes) -> Map { + let mut map = Map::new(); + for (key, value) in attrs.map(|x| { + let attr = x.unwrap(); + (attr.key, attr.unescape_value().unwrap()) + }) { + map.insert( + String::from_utf8(key.as_ref().to_vec()).unwrap(), + Value::String(value.to_string()), + ); + } + map +} +pub fn attrs_to_vec(attrs: Attributes) -> Vec<(&[u8], Cow)> { + let mut v = Vec::new(); + for (key, value) in attrs.map(|x| { + let attr = x.unwrap(); + (attr.key, attr.unescape_value().unwrap()) + }) { + v.push((key.0, value)); + } + v +} +pub async fn read_xml_pkg( + reader: &mut Reader>, + buf: &mut Vec, + bulk_sender: &mut BulkSender, +) -> Result<()> { + let mut state = XmlState::Ignore; + let mut pkg_id = indradb::util::generate_uuid_v1(); + // let mut arch = None; + + let mut pkg_detail = PkgDetail::default(); + loop { + state = match (state, reader.read_event_into(buf)) { + (_, Ok(Event::Start(ref e))) => { + let state = XmlState::from_bytes(e.name().as_ref()); + match state { + XmlState::PkgDetail(ref pkg_state) => match pkg_state { + PkgDetailState::CheckSum => { + let v = attrs_to_vec(e.attributes()); + for (key, value) in v { + match key { + b"type" => pkg_detail.checksum.r#type = value.to_string(), + b"pkgid" => pkg_detail.checksum.pkgid = value.to_string(), + _ => {} + } + } + } + _ => {} + }, + _ => {} + } + state + } + (XmlState::Format, _) => { + read_xml_format( + reader, + pkg_id, + &pkg_detail.name, + buf, + bulk_sender, + &pkg_detail.arch, + ) + .await; + XmlState::Ignore + } + (state, Ok(Event::Text(ref bt))) => { + match state { + XmlState::PkgDetail(pkg_detail_state) => { + let v = bt.unescape().unwrap().to_string(); + match pkg_detail_state { + PkgDetailState::Name => pkg_detail.name = v, + PkgDetailState::Arch => pkg_detail.arch = v, + PkgDetailState::CheckSum => { + pkg_detail.checksum.checksum = v.clone(); + pkg_id = uuid::Builder::from_bytes(md5::compute(v).0).into_uuid(); + } + PkgDetailState::Summary => pkg_detail.summary = v, + PkgDetailState::Description => pkg_detail.description = v, + PkgDetailState::Packager => pkg_detail.packager = v, + PkgDetailState::Url => pkg_detail.url = v, + _ => {} + } + } + _ => {} + } + XmlState::Ignore + } + (_, Ok(Event::Empty(bs))) => { + match XmlState::from_bytes(bs.name().as_ref()) { + XmlState::PkgDetail(pkg_state) => { + let v = attrs_to_vec(bs.attributes()); + match pkg_state { + PkgDetailState::Version => { + for (key, value) in v { + match key { + b"epoch" => { + pkg_detail.version.epoch = + Some(value.parse::().unwrap()) + } + b"ver" => { + pkg_detail.version.version = Some(value.to_string()) + } + b"rel" => { + pkg_detail.version.release = Some(value.to_string()) + } + _ => {} + } + } + } + PkgDetailState::Time => { + for (key, value) in v { + match key { + b"file" => pkg_detail.time.file = value.parse::().ok(), + b"build" => { + pkg_detail.time.build = value.parse::().ok() + } + _ => {} + } + } + } + PkgDetailState::Size => { + for (key, value) in v { + match key { + b"package" => { + pkg_detail.size.package = value.parse::().ok() + } + b"installed" => { + pkg_detail.size.installed = value.parse::().ok() + } + b"archive" => { + pkg_detail.size.archive = value.parse::().ok() + } + _ => {} + } + } + } + PkgDetailState::Location => { + for (key, value) in v { + match key { + b"href" => pkg_detail.location = Some(value.to_string()), + _ => {} + } + } + } + _ => {} + } + } + _ => {} + } + XmlState::Ignore + } + (XmlState::Ignore, Ok(Event::End(ref e))) => { + match XmlState::from_bytes(e.name().as_ref()) { + XmlState::Package => { + bulk_sender.create_package_vertex(pkg_id).await; + bulk_sender + .push(BulkInsertItem::VertexProperty( + pkg_id, + PkgDetailState::Name.as_identifier(), + ijson!(pkg_detail.name), + )) + .await; + bulk_sender + .push(BulkInsertItem::VertexProperty( + pkg_id, + PkgDetail::as_identifier(), + ijson!(pkg_detail), + )) + .await; + pkg_id = indradb::util::generate_uuid_v1(); + } + _ => {} + }; + XmlState::Ignore + } + (_, Ok(Event::Eof)) => { + break; + } + (_, _) => XmlState::Ignore, + }; + } + Ok(()) +} +pub async fn read_xml_format( + reader: &mut Reader>, + pkg_id: Uuid, + pkg_name: &str, + buf: &mut Vec, + bulk_sender: &mut BulkSender, + arch: &String, +) { + let mut state = XmlState::Ignore; + let mut format_detail = FormatDetail::default(); + loop { + state = match (state, reader.read_event_into(buf)) { + (_, Ok(Event::Start(ref e))) => XmlState::from_bytes(e.name().as_ref()), + (XmlState::Relation(relation), _) => { + match relation { + Relation::Provides => { + read_xml_provide_entry(reader, pkg_id, buf, bulk_sender, pkg_name, arch) + .await; + } + _ => { + read_xml_entry(reader, pkg_id, &relation, buf, bulk_sender, pkg_name, arch) + .await; + } + } + XmlState::Ignore + } + (XmlState::FormatDetail(format_state), Ok(Event::Text(ref bt))) => { + let value = bt.unescape().ok().map(String::from); + match format_state { + FormatDetailState::License => { + format_detail.license = value; + } + FormatDetailState::Vendor => { + format_detail.vendor = value; + } + FormatDetailState::Group => { + format_detail.group = value; + } + FormatDetailState::BuildHost => { + format_detail.build_host = value; + } + FormatDetailState::SourceRpm => { + format_detail.source_rpm = value; + } + FormatDetailState::File => { + let file_path = value.unwrap(); + bulk_sender + .send_file_entry(file_path.as_str(), pkg_id, pkg_name, &arch) + .await; + } + _ => {} + } + XmlState::Ignore + } + (_, Ok(Event::Empty(ref bs))) => { + match XmlState::from_bytes(bs.name().as_ref()) { + XmlState::FormatDetail(ref format) => match format { + FormatDetailState::HeaderRange => { + let mut header_range = HeaderRange::default(); + let v = attrs_to_vec(bs.attributes()); + for (key, value) in v { + match key { + b"start" => { + header_range.start = value.parse::().unwrap(); + } + b"end" => header_range.end = value.parse::().unwrap(), + _ => {} + } + } + format_detail.header_range = Some(header_range); + } + _ => {} + }, + _ => {} + } + XmlState::Ignore + } + (XmlState::Ignore, Ok(Event::End(ref e))) => { + match XmlState::from_bytes(e.name().as_ref()) { + XmlState::Format => { + bulk_sender + .push(BulkInsertItem::VertexProperty( + pkg_id, + FormatDetail::as_identifier(), + ijson!(format_detail), + )) + .await; + break; + } + _ => {} + }; + XmlState::Ignore + } + (_, _) => XmlState::Ignore, + }; + } +} +pub async fn read_xml_provide_entry( + reader: &mut Reader>, + pkg_id: Uuid, + buf: &mut Vec, + bulk_sender: &mut BulkSender, + pkg_name: &str, + arch: &String, +) { + let mut state = XmlState::Ignore; + loop { + state = match (state, reader.read_event_into(buf)) { + (_, Ok(Event::Empty(ref bs))) => { + match XmlState::from_bytes(bs.name().as_ref()) { + XmlState::Entry => { + let mut entry_detail = EntryDetail::new(pkg_name, arch); + for (key, value) in bs.attributes().map(|x| { + let attr = x.unwrap(); + (attr.key, attr.unescape_value().unwrap()) + }) { + match key.as_ref() { + b"name" => entry_detail.entry_name = value.to_string(), + b"flags" => entry_detail.flags = Some(value.to_string()), + b"epoch" => { + entry_detail.epoch = Some(value.parse::().unwrap()) + } + b"ver" => entry_detail.ver = Some(value.to_string()), + b"rel" => entry_detail.rel = Some(value.to_string()), + b"pre" => entry_detail.pre = true, + _ => {} + } + } + bulk_sender.send_provide_entry(entry_detail, pkg_id).await; + } + _ => {} + } + XmlState::Ignore + } + (_, Ok(Event::End(_))) => { + break; + } + (_, _) => XmlState::Ignore, + }; + } +} +pub async fn read_xml_entry( + reader: &mut Reader>, + pkg_id: Uuid, + edge_relation: &Relation, + buf: &mut Vec, + bulk_sender: &mut BulkSender, + pkg_name: &str, + arch: &String, +) { + let mut state = XmlState::Ignore; + let mut requires = Vec::new(); + loop { + state = match (state, reader.read_event_into(buf)) { + (_, Ok(Event::Empty(ref bs))) => { + match XmlState::from_bytes(bs.name().as_ref()) { + XmlState::Entry => { + let mut entry_detail = EntryDetail::new(pkg_name, arch); + for (key, value) in bs.attributes().map(|x| { + let attr = x.unwrap(); + (attr.key, attr.unescape_value().unwrap()) + }) { + match key.as_ref() { + b"name" => { + entry_detail.entry_name = value.to_string(); + } + b"flags" => entry_detail.flags = Some(value.to_string()), + b"epoch" => { + entry_detail.epoch = Some(value.parse::().unwrap()) + } + b"ver" => entry_detail.ver = Some(value.to_string()), + b"rel" => entry_detail.rel = Some(value.to_string()), + b"pre" => entry_detail.pre = true, + _ => {} + } + } + requires.push(entry_detail); + } + _ => {} + } + XmlState::Ignore + } + (_, Ok(Event::End(_))) => { + break; + } + (_, _) => XmlState::Ignore, + }; + } + bulk_sender + .push(BulkInsertItem::VertexProperty( + pkg_id, + edge_relation.as_identifier(), + ijson!(requires), + )) + .await +} +impl BulkSender { + pub async fn send_provide_entry(&mut self, entry_detail: EntryDetail, pkg_id: Uuid) { + let entry_id = + uuid::Builder::from_bytes(md5::compute(entry_detail.entry_name.clone()).0).into_uuid(); + self.create_entry_vertex(entry_id).await; + self.push(BulkInsertItem::VertexProperty( + entry_id, + EntryDetail::name_as_identifier(), + ijson!(entry_detail.entry_name), + )) + .await; + let edge = Edge::new(pkg_id, Relation::Provides.as_identifier(), entry_id); + self.push(BulkInsertItem::Edge(edge.clone())).await; + self.push(BulkInsertItem::EdgeProperty( + edge, + EntryDetail::as_identifier(), + ijson!(entry_detail), + )) + .await; + } + pub async fn send_file_entry( + &mut self, + file_path: &str, + pkg_id: Uuid, + pkg_name: &str, + arch: &str, + ) { + let file_uuid = uuid::Builder::from_bytes(md5::compute(file_path).0).into_uuid(); + let file_detail = FileDetail::new(file_path, pkg_name, arch); + self.create_entry_vertex(file_uuid).await; + self.push(BulkInsertItem::VertexProperty( + file_uuid, + FormatDetailState::File.as_identifier(), + ijson!(file_path), + )) + .await; + let edge = Edge::new(pkg_id, FormatDetailState::File.as_identifier(), file_uuid); + self.push(BulkInsertItem::Edge(edge.clone())).await; + self.push(BulkInsertItem::EdgeProperty( + edge, + FileDetail::as_identifier(), + ijson!(file_detail), + )) + .await + } +} +// let file_name = value.unwrap(); +// let id = uuid::Builder::from_bytes(md5::compute(file_name.as_str()).0) +// .into_uuid(); +// bulk_sender.create_entry_vertex(id).await; +// bulk_sender +// .push(BulkInsertItem::VertexProperty( +// id, +// FormatDetailState::File.as_identifier(), +// ijson!(file_name), +// )) +// .await; +// let edge = Edge::new(pkg_id, FormatDetailState::File.as_identifier(), id); +// bulk_sender.push(BulkInsertItem::Edge(edge.clone())).await; diff --git a/rdnf/src/cache/read_metadata.rs b/rdnf/src/cache/read_metadata.rs new file mode 100644 index 0000000000000000000000000000000000000000..b96dddfb2c7f8385d03c1d04c52cc45e0a1b9cb7 --- /dev/null +++ b/rdnf/src/cache/read_metadata.rs @@ -0,0 +1,133 @@ +use anyhow::Result; +use async_channel::{bounded, Receiver, Sender}; + +use indradb::{BulkInsertItem, Database, Datastore, RocksdbDatastore, Vertex}; +use quick_xml::Reader; +use std::{io::BufReader, mem::replace, path::Path, sync::Arc}; +use tokio::task::JoinHandle; +use uuid::Uuid; + +use crate::cache::{mmap_file::MmapSlice, parse_xml::read_xml_pkg}; + +use super::parse_xml::XmlState; +pub struct BulkInsert { + receiver: Receiver>, + workers: Vec>, +} +impl BulkInsert { + pub fn new( + receiver: Receiver>, + db: Arc>, + task_size: usize, + ) -> Result { + let mut workers = Vec::default(); + for _ in 0..task_size { + let rx = receiver.clone(); + let db_clone = db.clone(); + workers.push(tokio::spawn(async move { + while let Ok(buf) = rx.recv().await { + db_clone.bulk_insert(buf).unwrap(); + } + })) + } + Ok(Self { receiver, workers }) + } + pub async fn close(self) { + for worker in self.workers { + worker.await.unwrap(); + } + self.receiver.close(); + } +} +pub struct BulkSender { + buffer_size: usize, + sender: Sender>, + buffer: Vec, +} +impl BulkSender { + pub fn new(sender: Sender>, buffer_size: usize) -> Self { + Self { + buffer_size, + sender: sender, + buffer: Vec::with_capacity(buffer_size), + } + } + pub async fn push(&mut self, item: BulkInsertItem) { + self.buffer.push(item); + if self.buffer.len() >= self.buffer.capacity() { + let old_buffer = replace(&mut self.buffer, Vec::with_capacity(self.buffer_size)); + self.sender.send(old_buffer).await.unwrap(); + } + } + pub async fn flush(self) { + if !self.buffer.is_empty() { + self.sender.send(self.buffer).await.unwrap(); + } + } + pub async fn create_package_vertex(&mut self, id: Uuid) { + self.push(BulkInsertItem::Vertex(Vertex { + id, + t: XmlState::Package.as_identifier().unwrap(), + })) + .await; + } + pub async fn create_entry_vertex(&mut self, id: Uuid) { + self.push(BulkInsertItem::Vertex(Vertex { + id, + t: XmlState::Entry.as_identifier().unwrap(), + })) + .await; + } +} +impl AsRef for BulkInsert { + fn as_ref(&self) -> &Self { + &self + } +} +impl AsMut for BulkInsert { + fn as_mut(&mut self) -> &mut BulkInsert { + self + } +} +impl AsMut for BulkSender { + fn as_mut(&mut self) -> &mut BulkSender { + self + } +} +pub async fn read_metadata_from_xml>( + db: &Arc>, + path: P, +) -> Result<()> { + let f = tokio::fs::File::open(path).await?; + let cores = num_cpus::get(); + let (sender, receiver) = bounded::>(8); + let bulk_inseter = BulkInsert::new(receiver, db.clone(), cores * 2).unwrap(); + let mut slices = MmapSlice::splitn_file_at(&f, cores * 2, b"").await?; + let mut handles = Vec::new(); + loop { + match slices.pop() { + Some(slice) => { + let bulk_sender = BulkSender::new(sender.clone(), 1000); + handles.push(tokio::spawn(read_slice(bulk_sender, slice))); + } + None => { + break; + } + } + } + for handle in handles { + handle.await??; + } + sender.close(); + bulk_inseter.close().await; + db.sync()?; + Ok(()) +} +#[inline] +async fn read_slice(mut bulk_sender: BulkSender, slice: MmapSlice) -> Result<()> { + let mut buf: Vec = Vec::new(); + let mut reader = Reader::from_reader(BufReader::new(slice)); + read_xml_pkg(&mut reader, buf.as_mut(), bulk_sender.as_mut()).await?; + bulk_sender.flush().await; + Ok(()) +} diff --git a/rdnf/src/cache/repo_data.rs b/rdnf/src/cache/repo_data.rs new file mode 100644 index 0000000000000000000000000000000000000000..12d32489c206962d47d0dd1f879e34744e5db65b --- /dev/null +++ b/rdnf/src/cache/repo_data.rs @@ -0,0 +1,434 @@ +use std::{ops::Deref, sync::Arc}; + +use super::{ + model::{EntryDetail, FileDetail, FormatDetail, PkgDetail}, + parse_xml::{PkgDetailState, Relation}, +}; + +use anyhow::Result; +use indradb::{ + ijson, CountQueryExt, Database, Datastore, Json, PipePropertyQuery, PipeQuery, Query, + QueryOutputValue, SpecificVertexQuery, VertexWithPropertyPresenceQuery, + VertexWithPropertyValueQuery, +}; + +use serde_json::Value; +use uuid::Uuid; +pub struct RepoData(Arc>); +impl Deref for RepoData { + type Target = Arc>; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl Clone for RepoData { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} +impl RepoData { + pub fn new(db: &Arc>) -> Self { + Self(db.clone()) + } + pub fn get_pkg_uuid_by_pkg_name(self, pkg_name: Arc) -> Option { + let query = VertexWithPropertyValueQuery::new( + PkgDetailState::Name.as_identifier(), + ijson!(pkg_name.as_str()), + ); + match self.0.get(Query::VertexWithPropertyValue(query.into())) { + Ok(mut r) => match r.pop() { + Some(q) => match q { + QueryOutputValue::Vertices(mut s) => { + match s.pop() { + Some(t) => return Some(t.id), + None => {} + }; + } + _ => {} + }, + None => {} + }, + Err(_) => {} + }; + None + } + fn get_pkg_relations_by_uuid( + &self, + pkg_uuid: Uuid, + relation: Relation, + ) -> Option> { + if let Ok(q) = + PipePropertyQuery::new(Box::new(SpecificVertexQuery::single(pkg_uuid).into())) + { + let q = q.name(relation.as_identifier()); + if let Ok(mut r) = self.get(q) { + if let Some(output) = r.pop() { + match output { + QueryOutputValue::VertexProperties(mut vps) => { + if let Some(mut vp) = vps.pop() { + if let Some(np) = vp.props.pop() { + return parse_entry_detail(np.value); + } + } + } + _ => {} + } + } + }; + } + None + } + pub async fn get_pkg_requires_by_uuid(&self, pkg_uuid: Uuid) -> Option> { + self.get_pkg_relations_by_uuid(pkg_uuid, Relation::Requires) + } + pub async fn get_pkg_conflicts_by_uuid(&self, pkg_uuid: Uuid) -> Option> { + self.get_pkg_relations_by_uuid(pkg_uuid, Relation::Conflicts) + } + pub async fn get_pkg_obsoletes_by_uuid(&self, pkg_uuid: Uuid) -> Option> { + self.get_pkg_relations_by_uuid(pkg_uuid, Relation::Obsoletes) + } + pub async fn get_pkg_provides_by_uuid(self, pkg_uuid: Uuid) -> Option> { + if let Ok(pipe_query) = PipeQuery::new( + Box::new(SpecificVertexQuery::single(pkg_uuid).into()), + indradb::EdgeDirection::Outbound, + ) { + if let Ok(pipe_property_query) = PipePropertyQuery::new(Box::new(pipe_query.into())) { + if let Ok(mut r) = self.get(pipe_property_query.name(EntryDetail::as_identifier())) + { + if let Some(output) = r.pop() { + match output { + QueryOutputValue::EdgeProperties(eps) => { + // dbg!(eps); + let res = eps + .iter() + .map(|ep| { + serde_json::from_value::( + ep.props[0].value.0.as_ref().clone(), + ) + .unwrap() + }) + .collect::>(); + if res.len() > 0 { + return Some(res); + } + } + _ => {} + } + } + }; + }; + }; + None + } + pub async fn get_what_pkg_provide_entry_by_name( + self, + entry_name: Arc, + ) -> Option> { + let entry_id = uuid::Builder::from_bytes(md5::compute(entry_name.as_str()).0).into_uuid(); + if let Ok(pipe_query) = PipeQuery::new( + Box::new(SpecificVertexQuery::single(entry_id).into()), + indradb::EdgeDirection::Inbound, + ) { + if let Ok(pipe_property_query) = PipePropertyQuery::new(Box::new(pipe_query.into())) { + if let Ok(mut r) = self.get(pipe_property_query.name(EntryDetail::as_identifier())) + { + if let Some(output) = r.pop() { + match output { + QueryOutputValue::EdgeProperties(eps) => { + // dbg!(&eps); + let res = eps + .iter() + .map(|ep| { + ( + ep.edge.outbound_id, + serde_json::from_value::( + ep.props[0].value.0.as_ref().clone(), + ) + .unwrap(), + ) + }) + .collect::>(); + if res.len() > 0 { + return Some(res); + } + } + _ => {} + } + } + }; + }; + }; + None + } + pub async fn get_what_pkg_provide_file_by_path( + self, + file_path: Arc, + ) -> Option> { + let file_uuid = uuid::Builder::from_bytes(md5::compute(file_path.as_str()).0).into_uuid(); + if let Ok(pipe_query) = PipeQuery::new( + Box::new(SpecificVertexQuery::single(file_uuid).into()), + indradb::EdgeDirection::Inbound, + ) { + if let Ok(pipe_property_query) = PipePropertyQuery::new(Box::new(pipe_query.into())) { + if let Ok(mut r) = self.get(pipe_property_query.name(FileDetail::as_identifier())) { + // dbg!(&r); + if let Some(output) = r.pop() { + match output { + QueryOutputValue::EdgeProperties(eps) => { + let res = eps + .iter() + .map(|ep| { + ( + ep.edge.outbound_id, + serde_json::from_value::( + ep.props[0].value.0.as_ref().clone(), + ) + .unwrap(), + ) + }) + .collect::>(); + if res.len() > 0 { + return Some(res); + } + } + _ => {} + } + } + }; + }; + }; + None + } + pub async fn get_pkg_detail_by_name(&self, pkg_name: &str) -> Option> { + if let Ok(pipe_property_query) = PipePropertyQuery::new(Box::new( + VertexWithPropertyValueQuery::new( + PkgDetailState::Name.as_identifier(), + ijson!(pkg_name), + ) + .into(), + )) { + if let Ok(mut r) = self.get(pipe_property_query.name(PkgDetail::as_identifier())) { + if let Some(output) = r.pop() { + match output { + QueryOutputValue::VertexProperties(vps) => { + let res = vps + .iter() + .map(|vp| { + ( + vp.vertex.id, + serde_json::from_value::( + vp.props[0].value.0.as_ref().clone(), + ) + .unwrap(), + ) + }) + .collect::>(); + if res.len() > 0 { + return Some(res); + } + } + _ => {} + } + } + } + }; + None + } + pub async fn get_pkg_detail_by_uuid(self, pkg_uuid: Uuid) -> Option { + if let Ok(pipe_property_query) = + PipePropertyQuery::new(Box::new(SpecificVertexQuery::new(vec![pkg_uuid]).into())) + { + if let Ok(mut r) = self.get(pipe_property_query.name(PkgDetail::as_identifier())) { + if let Some(output) = r.pop() { + match output { + QueryOutputValue::VertexProperties(vps) => { + let mut res = vps + .iter() + .map(|vp| { + serde_json::from_value::( + vp.props[0].value.0.as_ref().clone(), + ) + .unwrap() + }) + .collect::>(); + return res.pop(); + } + _ => {} + } + } + } + }; + None + } + pub async fn get_pkg_version_by_uuid(self, pkg_uuid: Uuid) -> Option { + if let Some(pkg_detail) = self.get_pkg_detail_by_uuid(pkg_uuid).await { + Some(pkg_detail.version) + } else { + None + } + } + pub async fn get_format_detail_by_uuid(&self, pkg_uuid: Uuid) -> Option { + if let Ok(pipe_property_query) = + PipePropertyQuery::new(Box::new(SpecificVertexQuery::single(pkg_uuid).into())) + { + if let Ok(mut r) = self.get(pipe_property_query.name(FormatDetail::as_identifier())) { + if let Some(output) = r.pop() { + match output { + QueryOutputValue::VertexProperties(mut vps) => { + if let Some(mut vp) = vps.pop() { + if let Some(np) = vp.props.pop() { + return serde_json::from_value::(Value::Object( + np.value.as_object().unwrap().to_owned(), + )) + .ok(); + } + } + } + _ => {} + } + } + }; + } + None + } + pub async fn count_pkg(&self) -> Result { + let mut count = 0; + if let Some(output) = self + .get( + VertexWithPropertyPresenceQuery::new(PkgDetailState::Name.as_identifier()) + .count()?, + )? + .pop() + { + match output { + QueryOutputValue::Count(c) => { + count = c; + } + _ => {} + } + }; + Ok(count) + } +} +fn parse_entry_detail(json: Json) -> Option> { + let mut entries = Vec::new(); + if let Some(v) = json.as_array() { + for value in v { + match serde_json::from_value::(value.clone()) { + Ok(entry) => entries.push(entry), + Err(_) => {} + } + } + } + if entries.is_empty() { + None + } else { + Some(entries) + } +} +#[cfg(test)] +mod tests { + use std::{path::Path, sync::Arc}; + + use crate::{conf::repo_conf::RepoConfig, repo::Repo}; + use anyhow::Result; + async fn get_test_repo() -> Result { + let _repo_name = "test_repo"; + let test_cache_db_path = "tests/test_repo"; + let xml_path = + "assest/54bbae6e9d4cd4865a55f7558daef86574cddc5f2a4f8a0d9c74f946e1a45dd3-primary.xml"; + let db_path = Path::new(test_cache_db_path); + let repo = RepoConfig::default(); + if db_path.is_dir() { + Repo::open(Arc::new(repo), db_path).await + } else { + Repo::create(Arc::new(repo), test_cache_db_path, xml_path).await + } + } + #[tokio::test(flavor = "multi_thread")] + async fn test_get_pkg_id() -> Result<()> { + let repo = get_test_repo().await?; + let pkg_id = repo + .data + .get_pkg_uuid_by_pkg_name(Arc::new("389-ds-base".to_string())) + .unwrap(); + dbg!(pkg_id); + Ok(()) + } + #[tokio::test] + async fn test_pkg_requires() -> Result<()> { + let repo = get_test_repo().await?; + let pkg_uuid = repo + .data + .clone() + .get_pkg_uuid_by_pkg_name(Arc::new("389-ds-base-libs".to_string())) + .unwrap(); + // let requires = repo.data.get_pkg_requires_by_uuid(pkg_uuid).await.unwrap(); + // let conflcts = repo.data.get_pkg_conflicts_by_uuid(pkg_uuid).await.unwrap(); + // let obsoletes = repo.data.get_pkg_obsoletes_by_uuid(pkg_uuid).await.unwrap(); + let provides = repo.data.get_pkg_provides_by_uuid(pkg_uuid).await; + dbg!(provides); + // dbg!(requires); + // dbg!(conflcts); + // dbg!(obsoletes); + Ok(()) + } + #[tokio::test] + async fn test_pkg_provide() -> Result<()> { + let repo = get_test_repo().await?; + let k = repo + .data + .get_what_pkg_provide_entry_by_name(Arc::new("xz-libs(x86-64)".to_string())) + .await; + dbg!(k); + Ok(()) + } + #[tokio::test] + async fn test_pkg_provide_file() -> Result<()> { + let repo = get_test_repo().await?; + let k = repo + .data + .get_what_pkg_provide_file_by_path(Arc::new("/etc/fedora-release".to_string())) + .await; + dbg!(k); + Ok(()) + } + #[tokio::test] + async fn test_pkg_detail() -> Result<()> { + let repo = get_test_repo().await?; + let pkg_uuid = repo + .data + .clone() + .get_pkg_uuid_by_pkg_name(Arc::new("389-ds-base".to_string())) + .unwrap(); + + let detail = repo.data.get_pkg_detail_by_uuid(pkg_uuid).await.unwrap(); + + dbg!(detail); + Ok(()) + } + #[tokio::test] + async fn test_pkg_count() -> Result<()> { + let repo = get_test_repo().await?; + let p = repo.data.count_pkg().await?; + dbg!(p); + Ok(()) + } + // #[tokio::test] + // async fn test_pkg_conflicts() -> Result<()> { + // let repo = get_test_repo().await?; + // let pkg_uuid = repo.data.get_pkg_uuid_by_pkg_name("389-ds-base").unwrap(); + // let p = repo.data.get_pkg_requires_by_uuid(pkg_uuid).unwrap(); + // dbg!(p); + // Ok(()) + // } + #[test] + fn testa() { + let path = "tests/test_repo"; + if Path::new(path).is_dir() { + dbg!("a"); + } else { + dbg!("b"); + }; + } +} diff --git a/rdnf/src/cache/ver.rs b/rdnf/src/cache/ver.rs new file mode 100644 index 0000000000000000000000000000000000000000..58ee0b560d9e0ebf8acdad96be02126ecf50e291 --- /dev/null +++ b/rdnf/src/cache/ver.rs @@ -0,0 +1,446 @@ +use std::cmp::Ordering; + +use rpm::RpmEntry; +use serde::{Deserialize, Serialize}; + +use super::model::EntryDetail; +fn partial(s: &Option, other: &Option) -> Option { + match (s, other) { + (Some(s), Some(o)) => { + let mut x_v = split_num_alpha(s); + let mut y_v = split_num_alpha(o); + match x_v.len().cmp(&y_v.len()) { + Ordering::Greater => { + for _ in 0..x_v.len() - y_v.len() { + y_v.push("0".to_string()); + } + } + Ordering::Less => { + for _ in 0..y_v.len() - x_v.len() { + x_v.push("0".to_string()); + } + } + _ => {} + }; + for i in 0..x_v.len() { + match x_v[i].to_lowercase().partial_cmp(&y_v[i].to_lowercase()) { + Some(std::cmp::Ordering::Equal) => {} + _ => { + if x_v[i].len() == y_v[i].len() { + return x_v[i].partial_cmp(&y_v[i]); + } else { + let x = x_v[i].parse::().unwrap_or_default(); + let y = y_v[i].parse::().unwrap_or_default(); + return x.partial_cmp(&y); + } + } + } + } + } + (Some(_), None) => return Some(Ordering::Greater), + _ => {} + } + Some(Ordering::Equal) +} +// fn partial(s: &str, other: &str) -> Option { +// let mut x_v = split_num_alpha(s); +// let mut y_v = split_num_alpha(other); +// match x_v.len().cmp(&y_v.len()) { +// Ordering::Greater => { +// for _ in 0..x_v.len() - y_v.len() { +// y_v.push("0".to_string()); +// } +// } +// Ordering::Less => { +// for _ in 0..y_v.len() - x_v.len() { +// x_v.push("0".to_string()); +// } +// } +// _ => {} +// }; +// for i in 0..x_v.len() { +// match x_v[i].to_lowercase().partial_cmp(&y_v[i].to_lowercase()) { +// Some(std::cmp::Ordering::Equal) => {} +// _ => { +// if x_v[i].len() == y_v[i].len() { +// return x_v[i].partial_cmp(&y_v[i]); +// } else { +// let x = x_v[i].parse::().unwrap_or_default(); +// let y = y_v[i].parse::().unwrap_or_default(); +// return x.partial_cmp(&y); +// } +// } +// }; +// } +// Some(Ordering::Equal) +// } + +fn split_num_alpha(s: &str) -> Vec { + let mut tmp = String::new(); + enum ArchiveState { + Empty, + Alpha, + Numeric, + } + let mut archive = ArchiveState::Empty; + let mut result: Vec = Vec::new(); + for ch in s.chars() { + if ch.is_alphabetic() { + match archive { + ArchiveState::Numeric => { + result.push(tmp.clone()); + tmp.clear(); + } // a7 a.7 + _ => {} + } + archive = ArchiveState::Alpha; + tmp += &ch.to_string(); + } else if ch.is_numeric() { + match archive { + ArchiveState::Alpha => { + result.push(tmp.clone()); + tmp.clear() + } + _ => {} + } + archive = ArchiveState::Numeric; + tmp += &ch.to_string() + } else { + match archive { + ArchiveState::Empty => {} + _ => { + result.push(tmp.clone()); + tmp.clear(); + archive = ArchiveState::Empty + } + } + } + } + match archive { + ArchiveState::Empty => {} + _ => { + result.push(tmp.clone()); + } + } + result +} +#[derive(Debug, Serialize, Deserialize, Default, Eq, Hash, Clone)] +pub struct Version { + pub epoch: Option, + pub version: Option, + pub release: Option, +} +// #[inline] +// pub fn option_string_to_str(s: &Option) -> &str { +// s.as_ref().map_or_else(|| "", |v| v.as_str()) +// } +impl ::core::cmp::PartialEq for Version { + fn eq(&self, other: &Self) -> bool { + return if self.epoch.unwrap_or_default() == other.epoch.unwrap_or_default() { + match partial(&self.version, &other.version) { + Some(Ordering::Equal) => match partial(&self.release, &other.release) { + Some(Ordering::Equal) => true, + _ => false, + }, + _ => false, + } + // match partial( + // option_string_to_str(&self.version), + // option_string_to_str(&other.version), + // ) { + // Some(core::cmp::Ordering::Equal) => { + // match partial( + // option_string_to_str(&self.release), + // option_string_to_str(&other.release), + // ) { + // Some(Ordering::Equal) => true, + // _ => false, + // } + // } + // _ => false, + // } + } else { + false + }; + } +} +impl ::core::cmp::PartialOrd for Version { + fn partial_cmp(&self, other: &Self) -> Option { + match self + .epoch + .unwrap_or_default() + .partial_cmp(&other.epoch.unwrap_or_default()) + { + Some(core::cmp::Ordering::Equal) => {} + ord => return ord, + } + // match partial( + // option_string_to_str(&self.version), + // option_string_to_str(&other.version), + // ) { + // Some(core::cmp::Ordering::Equal) => {} + // ord => return ord, + // }; + // partial( + // option_string_to_str(&self.release), + // option_string_to_str(&other.release), + // ) + match partial(&self.version, &other.version) { + Some(Ordering::Equal) => {} + ord => { + return ord; + } + }; + partial(&self.version, &other.version) + } +} +impl Version { + // [epoch:]version[-release] + pub fn from_str(v: &str) -> Self { + let v = v.trim_end().trim_start(); + let mut epoch = None; + let mut version = None; + let mut release = None; + let rest = match v.split_once(":") { + Some((e, r)) => { + epoch = e.parse::().ok(); + r + } + None => v, + }; + let v = match rest.split_once("-") { + Some((v, r)) => { + release = Some(r.to_owned()); + v + } + None => rest, + }; + if v != "" { + version = Some(v.to_owned()); + } + Self { + epoch, + version, + release, + } + } + pub fn from_entry_detail(value: &EntryDetail) -> Self { + Version { + epoch: value.epoch, + version: value.ver.clone(), + release: value.rel.clone(), + } + } + pub fn from_rpm_entry(value: &RpmEntry) -> Self { + Version { + epoch: value.epoch.clone().map(|x| x.parse::().unwrap()), + version: value.version.clone(), + release: value.release.clone(), + } + } + pub fn new(epoch: Option, version: &Option, release: &Option) -> Self { + Version { + epoch, + version: version.clone(), + release: release.clone(), + } + } + pub fn check_version( + require: &Version, + provide: &Version, + require_flag: &Option, + provide_flag: &Option, + ) -> bool { + match (require_flag, provide_flag) { + (Some(r), Some(p)) => match r.as_str() { + "LT" => match p.as_str() { + "GE" | "EQ" | "GT" => require > provide, + _ => true, + }, + "LE" => match p.as_str() { + "GE" | "EQ" => require >= provide, + "GT" => require > provide, + _ => true, + }, + "EQ" => match p.as_str() { + "LE" => require >= provide, + "LT" => require > provide, + "EQ" => require == provide, + "GT" => require < provide, + "GE" => require <= provide, + _ => true, + }, + "GE" => match p.as_str() { + "LE" | "EQ" => require <= provide, + "LT" => require < provide, + _ => true, + }, + "GT" => match p.as_str() { + "LE" | "EQ" | "LT" => require < provide, + _ => true, + }, + _ => false, + }, + _ => true, + } + } +} +#[cfg(test)] +mod tests { + use crate::cache::model::EntryDetail; + + use super::*; + use std::cmp::Ordering; + #[test] + fn test_from_str() { + assert_eq!( + Version::from_str("9:1.0.2-1.fc37"), + Version { + epoch: Some(9), + version: Some("1.0.2".to_owned()), + release: Some("1.fc37".to_owned()), + } + ); + assert_eq!( + Version::from_str("1.0.2-1.fc37"), + Version { + epoch: None, + version: Some("1.0.2".to_owned()), + release: Some("1.fc37".to_owned()), + } + ); + assert_eq!( + Version::from_str("9:1.0.2"), + Version { + epoch: Some(9), + version: Some("1.0.2".to_owned()), + release: None, + } + ); + assert_eq!( + Version::from_str(""), + Version { + epoch: None, + version: None, + release: None, + } + ); + } + #[test] + fn test_version_compare() { + let e1 = Version { + epoch: None, + version: Some("1.2".to_string()), + release: None, + }; + let mut e2 = Version { + epoch: Some(0), + version: Some("1.2.0".to_string()), + release: Some("1.fc37".to_string()), + }; + assert!(e1 <= e2); + e2.release = None; + assert!(e1 == e2); + } + + #[test] + fn test_partial_compare() { + assert_eq!( + partial(&Some("1.0010".to_owned()), &Some("1.9".to_owned())), + Some(Ordering::Greater) + ); + assert_eq!( + partial(&Some("1.05".to_owned()), &Some("1.5".to_owned())), + Some(Ordering::Equal) + ); + assert_eq!( + partial(&Some("1.0".to_owned()), &Some("1".to_owned())), + Some(Ordering::Equal) + ); + assert_eq!( + partial(&Some("2.50".to_owned()), &Some("2.5".to_owned())), + Some(Ordering::Greater) + ); + assert_eq!( + partial(&Some("fc4".to_owned()), &Some("fc.4".to_owned())), + Some(Ordering::Equal) + ); + assert_eq!( + partial(&Some("FC5".to_owned()), &Some("fc4".to_owned())), + Some(Ordering::Greater) + ); + assert_eq!( + partial(&Some("2.5.0".to_owned()), &Some("2.5".to_owned())), + Some(Ordering::Equal) + ); + assert_eq!( + partial(&Some("2~~".to_owned()), &Some("1.9.9.9".to_owned())), + Some(Ordering::Greater) + ); + assert_eq!( + partial(&Some("2~".to_owned()), &Some("2.0.0.1".to_owned())), + Some(Ordering::Less) + ); + assert_eq!( + partial(&Some("2~~".to_owned()), &Some("2.0.0".to_owned())), + Some(Ordering::Equal) + ); + } + #[test] + fn test_from_require() { + let require_detail = EntryDetail { + entry_name: "abc".to_string(), + flags: Some("EQ".to_string()), + epoch: Some(0), + ver: Some("1.2.0".to_string()), + rel: Some("fc.37".to_string()), + pre: true, + pkg_name: "A".to_string(), + arch: "x86_64".to_string(), + }; + let v = Version::from_entry_detail(&require_detail); + assert_eq!( + v, + Version { + epoch: Some(0), + version: Some("1.2.0".to_string()), + release: Some("fc.37".to_string()) + } + ); + assert_eq!(require_detail.ver, Some("1.2.0".to_string())); + } + + #[test] + fn test_check_ver() { + let mut require = Version { + epoch: Some(0), + version: Some("5.2.5".to_owned()), + release: None, + }; + let mut provide = Version { + epoch: Some(0), + version: Some("5.2.5".to_owned()), + release: Some("10.fc37".to_owned()), + }; + assert!(Version::check_version( + &require, + &provide, + &Some("EQ".to_owned()), + &Some("EQ".to_owned()), + )); + provide.version = None; + assert!(!Version::check_version( + &require, + &provide, + &Some("EQ".to_owned()), + &Some("EQ".to_owned()), + )); + require.version = None; + assert!(Version::check_version( + &require, + &provide, + &Some("EQ".to_owned()), + &Some("EQ".to_owned()) + )); + } +} diff --git a/rdnf/src/cli.rs b/rdnf/src/cli.rs index 25e0c5d26f4ea17b03a9646d76dd7cfe4ac8d9c1..f58558af57f18261b9416cfbcfa8a6ef6d2776df 100644 --- a/rdnf/src/cli.rs +++ b/rdnf/src/cli.rs @@ -1,5 +1,3 @@ -use anyhow::bail; -use anyhow::Result; use clap::arg; use clap::command; use clap::Args; @@ -7,7 +5,7 @@ use clap::Parser; use clap::Subcommand; use crate::default::RDNF_CONF_FILE; -use crate::errors::ERROR_RDNF_RPM_INIT; +// use crate::errors::ERROR_RDNF_RPM_INIT; use clap::ArgAction::Append; use clap::ArgAction::SetTrue; #[derive(Parser, Debug, Clone)] @@ -18,7 +16,7 @@ pub struct Cli { #[arg(default_value_t=String::from("/"),long,value_parser)] pub installroot: String, #[arg(default_value_t=String::from(RDNF_CONF_FILE),short='c',long,value_parser)] - pub config_file: String, + pub config_file_path: String, #[arg(long,action=SetTrue)] pub plugins: bool, ///download all dependencies even if already installed @@ -120,8 +118,8 @@ pub struct AlterOption { pub tsflags_noscripts: bool, } #[derive(Args, Clone, Debug)] -pub struct InfoOption{ - pub pkgs:Vec, +pub struct InfoOption { + pub pkgs: Vec, #[arg(long,action=SetTrue)] pub all: bool, #[arg(long,action=SetTrue)] @@ -129,26 +127,27 @@ pub struct InfoOption{ #[arg(long,action=SetTrue)] pub available: bool, #[arg(long,action=SetTrue)] - pub extras:bool, + pub extras: bool, #[arg(long,action=SetTrue)] - pub obsoletes:bool, + pub obsoletes: bool, #[arg(long,action=SetTrue)] - pub recent:bool, + pub recent: bool, #[arg(long,action=SetTrue)] - pub upgrades:bool, + pub upgrades: bool, #[arg(long,action=SetTrue)] - pub updates:bool, + pub updates: bool, #[arg(long,action=SetTrue)] - pub dwongrades:bool, + pub dwongrades: bool, } impl Cli { - pub fn init(self) -> Result { + pub fn init(self) -> Self { let cli = self.check_fs_path(); - Ok(cli) + cli } pub fn check_fs_path(mut self) -> Self { self.installroot = self.installroot.trim_end_matches("/").to_string() + "/"; - self.config_file = self.installroot.clone() + self.config_file.trim_start_matches("/"); + // self.config_file_path = + // self.installroot.clone() + self.config_file_path.trim_start_matches("/"); match self.reposdir { Some(s) => { self.reposdir = Some(self.installroot.clone() + s.trim_start_matches("/")); @@ -158,11 +157,11 @@ impl Cli { self } } -pub fn rpm_init() -> Result<()> { - unsafe { - if rpm_sys::ffi::rpmReadConfigFiles(0 as *mut i8, 0 as *mut i8) != 0 { - bail!(ERROR_RDNF_RPM_INIT); - }; - } - Ok(()) -} +// pub fn rpm_init() -> Result<()> { +// unsafe { +// if rpm_sys::ffi::rpmReadConfigFiles(0 as *mut i8, 0 as *mut i8) != 0 { +// bail!(ERROR_RDNF_RPM_INIT); +// }; +// } +// Ok(()) +// } diff --git a/rdnf/src/command/alter.rs b/rdnf/src/command/alter.rs new file mode 100644 index 0000000000000000000000000000000000000000..df6e75cf883d2198d730b081fd1e87d460acc5dd --- /dev/null +++ b/rdnf/src/command/alter.rs @@ -0,0 +1,47 @@ +use std::sync::Arc; + +use crate::{ + cache::installed::InstalledRepo, cli::AlterOption, default::DEFAULT_RPMDB_LOCATION, + repo::CmdRepo, solve::SolveContext, Rdnf, +}; +use anyhow::Result; +#[derive(Debug, Clone)] +pub enum AlterType { + AutoErase, + AutoEraseAll, + DownGrade, + DownGradeAll, + Erase, + Install, + ReInstall, + Upgrade, + UpgradeAll, + DistroSync, + Obsoleted, +} +impl Rdnf { + pub async fn alter_command( + self, + pkgs: Vec, + _alter_type: AlterType, + _alter_args: AlterOption, + ) -> Result<()> { + let mut repos = Rdnf::make_cache( + self.repo_confs, + self.conf.config_main.clone(), + self.cli, + self.term, + ) + .await?; + repos.sort_by(|a, b| b.config.base.priority.cmp(&a.config.base.priority)); + let installed_repo = + InstalledRepo::open(&self.conf.config_main, DEFAULT_RPMDB_LOCATION).await?; + let (cmd_repo, _) = CmdRepo::new(Vec::<&str>::new()).await?; + let mut solve_context = SolveContext::new(repos, installed_repo, cmd_repo); + for pkg in pkgs { + solve_context.add_pkg_from_repo(Arc::new(pkg)).await?; + } + solve_context.solve().await?; + Ok(()) + } +} diff --git a/rdnf/src/command/install.rs b/rdnf/src/command/install.rs new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/rdnf/src/command/make_cache.rs b/rdnf/src/command/make_cache.rs new file mode 100644 index 0000000000000000000000000000000000000000..b96db71a9d50990bd201647c5cdf0c96bf3434f3 --- /dev/null +++ b/rdnf/src/command/make_cache.rs @@ -0,0 +1,318 @@ +use std::{ + fs::{create_dir_all, remove_file, rename, File, OpenOptions}, + io::{Read, Write}, + path::Path, + sync::Arc, + time::SystemTime, +}; + +use crate::{ + cli::Cli, + conf::{config_main::ConfigMain, metalink::Metalink, repo_conf::RepoConfig, repomd::Repomd}, + default::{ + CMDLINE_REPO_NAME, REPODATA_DIR_NAME, REPO_BASEURL_FILE_NAME, REPO_METADATA_FILE_NAME, + REPO_METADATA_FILE_PATH, REPO_METADATA_MARKER, REPO_METALINK_FILE_NAME, RPM_CACHE_DIR_NAME, + SOLVCACHE_DIR_NAME, + }, + repo::Repo, + utils::{ + check_root, download_single_file, get_file_md5, get_multi_progress, read_file_to_string, + recursively_remove_dir, + }, + Rdnf, +}; +use anyhow::{bail, Result}; +use console::{style, Term}; +use indicatif::ProgressBar; +use reqwest::Client; +use tokio::fs::metadata; + +impl Rdnf { + pub async fn make_cache( + repo_confs: Vec, + conf: Arc, + cli: Cli, + term: Term, + ) -> Result> { + let repos_iter = repo_confs + .iter() + .filter(|repo| repo.name.as_str() != CMDLINE_REPO_NAME && repo.base.enabled); + let mut repos = Vec::new(); + if !cli.cacheonly { + check_root()?; + let size = repos_iter + .filter(|r| r.base.lmetadata_expire > 0) + .filter(|r| r.cache_name.is_some()) + .filter(|r| r.detail.meta_link.is_some() || r.detail.base_url.is_some()) + .count(); + let (_multi_pb, mut pbs) = get_multi_progress(size); + let cli = Arc::new(cli); + let mut handles = Vec::new(); + for repo in repo_confs { + if repo.name.as_str() != CMDLINE_REPO_NAME + && repo.base.enabled + && repo.base.lmetadata_expire > 0 + && repo.cache_name.is_some() + && (repo.detail.meta_link.is_some() || repo.detail.base_url.is_some()) + { + let pb = pbs.pop().unwrap(); + let conf_clone = conf.clone(); + let cli_clone = cli.clone(); + + handles.push(( + repo.base.skip_if_unavailable, + repo.name.clone(), + tokio::spawn(init_repo(repo, conf_clone, cli_clone, pb)), + )); + } + } + + for (mut skip, repo_name, handle) in handles { + if skip { + if let Ok(h) = handle.await { + if let Ok(repo) = h { + skip = false; + repos.push(repo) + } + } + } else { + repos.push(handle.await??) + } + if cli.refresh { + let (_, width) = term.size(); + let offset = (width - 10) as usize; + term.write_line(repo_name.as_str())?; + term.move_cursor_up(1)?; + term.move_cursor_right(offset)?; + let status = if skip { + format!("{}", style("Skip").red()) + } else { + format!("{}", style("Done").green()) + }; + term.write_line(status.as_str())?; + } + } + } + Ok(repos) + } +} +pub async fn init_repo( + mut repo: RepoConfig, + conf: Arc, + cli: Arc, + pb: ProgressBar, +) -> Result { + let cache_dir = conf.cachedir.clone() + repo.cache_name.as_ref().unwrap() + "/"; + + if should_sync_metadata(&conf, &repo).await? { + recursively_remove_dir(cache_dir.clone() + REPODATA_DIR_NAME)?; + recursively_remove_dir(cache_dir.clone() + SOLVCACHE_DIR_NAME)?; + }; + let client = repo.get_req_client(&conf)?; + let repomd = repo + .get_repo_md( + cache_dir.as_str(), + cli.refresh, + conf.keepcache, + &client, + &pb, + ) + .await?; + repo.get_repo(pb, repomd, client, cache_dir).await +} +impl RepoConfig { + pub async fn get_repo_md( + &mut self, + cache_dir: &str, + refresh: bool, + keep_cache: bool, + client: &Client, + pb: &ProgressBar, + ) -> Result { + let repo_data_dir = cache_dir.to_owned() + REPODATA_DIR_NAME + "/"; + let meta_link_file = cache_dir.to_owned() + REPO_METALINK_FILE_NAME; + + let base_url_file = cache_dir.to_owned() + REPO_BASEURL_FILE_NAME; + let repo_md_file = repo_data_dir.to_owned() + REPO_METADATA_FILE_NAME; + let tmp_data_dir = cache_dir.to_owned() + "tmp/"; + let temp_repomd_file = tmp_data_dir.to_owned() + REPO_METADATA_FILE_NAME; + let mut need_download = if self.detail.meta_link.is_some() { + !Path::new(meta_link_file.as_str()).exists() + || !Path::new(base_url_file.as_str()).exists() + } else { + !Path::new(repo_md_file.as_str()).exists() + }; + let mut cookie: [u8; 16] = [0; 16]; + let mut replace_repo_md = false; + let mut replace_base_url = false; + if refresh { + if self.detail.meta_link.is_some() { + if Path::new(&meta_link_file).exists() { + cookie = get_file_md5(meta_link_file.as_str())?; + } + } else { + if Path::new(&repo_md_file).exists() { + cookie = get_file_md5(repo_md_file.as_str())?; + } + } + need_download = true; + } + + if need_download { + create_dir_all(tmp_data_dir.as_str())?; + if let Some(meta_link_url) = &self.detail.meta_link { + let tmp_metalink_file = tmp_data_dir.to_owned() + REPO_METALINK_FILE_NAME; + download_single_file(client, &meta_link_url, &tmp_metalink_file).await?; + let buf = read_file_to_string(tmp_metalink_file.as_str())?; + if cookie != md5::compute(buf.as_bytes()).0 { + replace_repo_md = true; + let url = Metalink::download_repomd( + &buf, + client, + temp_repomd_file.as_str(), + &self.full_name, + pb, + ) + .await?; + let tmp_base_url_file = tmp_data_dir.clone() + REPO_BASEURL_FILE_NAME; + let baseurl = url.trim_end_matches(REPO_METADATA_FILE_PATH); + self.detail.base_url = Some(baseurl.to_string()); + if Path::new(&tmp_base_url_file).exists() { + remove_file(&tmp_base_url_file)?; + } + OpenOptions::new() + .write(true) + .create(true) + .open(tmp_base_url_file.as_str())? + .write_all(baseurl.as_bytes())?; + replace_base_url = true; + rename(tmp_base_url_file, base_url_file.as_str())?; + if Path::new(repo_md_file.as_str()).exists() { + if get_file_md5(repo_md_file.as_str())? + == get_file_md5(temp_repomd_file.as_str())? + { + replace_repo_md = false; + }; + }; + }; + rename(tmp_metalink_file, meta_link_file)?; + } else { + if self.detail.base_url.is_none() { + bail!( + "Repo {} doesn't have base_url or meta_link ; please check", + self.full_name + ); + } + let url = + self.detail.base_url.as_ref().unwrap().to_owned() + REPO_METADATA_FILE_PATH; + download_single_file(client, url.as_str(), temp_repomd_file.as_str()).await?; + replace_repo_md = true; + if get_file_md5(temp_repomd_file.as_str())? == cookie { + replace_repo_md = false; + }; + } + } + + if self.detail.meta_link.is_some() + && !replace_base_url + && Path::new(base_url_file.as_str()).exists() + { + let mut buf = String::new(); + File::open(base_url_file.clone())?.read_to_string(&mut buf)?; + self.detail.base_url = Some(buf.trim_end_matches("/").to_string() + "/"); + } + let last_refresh_marker = cache_dir.to_owned() + REPO_METADATA_MARKER; + + if replace_repo_md { + recursively_remove_dir(repo_data_dir.as_str())?; + let solv_cache_dir = cache_dir.to_owned() + SOLVCACHE_DIR_NAME; + recursively_remove_dir(solv_cache_dir)?; + match remove_file(last_refresh_marker.clone()) { + Ok(_) => {} + Err(_) => {} + }; + if !keep_cache { + let rpms_cache_dir = cache_dir.to_owned() + RPM_CACHE_DIR_NAME + "/"; + recursively_remove_dir(rpms_cache_dir)?; + } + create_dir_all(repo_data_dir)?; + rename(temp_repomd_file, repo_md_file.clone())?; + } + + if need_download { + OpenOptions::new() + .write(true) + .create(true) + .open(last_refresh_marker)?; + } + + let repomd = Repomd::parse_from(repo_md_file.as_str())?; + Ok(repomd) + } +} +async fn should_sync_metadata(conf: &ConfigMain, repo: &RepoConfig) -> Result { + let refresh_flag = + conf.cachedir.clone() + repo.cache_name.as_ref().unwrap() + "/" + REPO_METADATA_MARKER; + let should = match metadata(refresh_flag).await { + Ok(m) => { + let mtime = m.modified()?; + let now = SystemTime::now(); + let duration = now.duration_since(mtime)?; + let s = duration.as_secs(); + if s as i128 >= repo.base.lmetadata_expire { + true + } else { + false + } + } + Err(_) => true, + }; + Ok(should) +} + +#[cfg(test)] +mod tests { + // use futures_util::StreamExt; + use reqwest::{header::CONTENT_LENGTH, ClientBuilder}; + // use tokio::io::AsyncWriteExt; + + #[tokio::test] + async fn test_reqwest() { + let package = vec![ + "389-ds-base-2.2.3-1.fc37.x86_64.rpm", + "389-ds-base-devel-2.2.3-1.fc37.x86_64.rpm", + "389-ds-base-libs-2.2.3-1.fc37.x86_64.rpm", + "389-ds-base-snmp-2.2.3-1.fc37.x86_64.rpm", + ]; + let prefix = + "https://mirrors.aliyun.com/fedora/releases/37/Everything/x86_64/os/Packages/3/"; + let cli = ClientBuilder::new().user_agent("rdnf").build().unwrap(); + let primary_gz_url="https://mirrors.aliyun.com/fedora/releases/37/Everything/x86_64/os/repodata/54bbae6e9d4cd4865a55f7558daef86574cddc5f2a4f8a0d9c74f946e1a45dd3-primary.xml.gz"; + let _pkg_url = prefix.to_owned() + package[0]; + let p = cli.head(primary_gz_url).send().await.unwrap(); + let count = p + .headers() + .get(CONTENT_LENGTH) + .unwrap() + .to_str() + .unwrap() + .parse::() + .unwrap(); + // let mut stream = cli.get(primary_gz_url).send().await.unwrap().bytes_stream(); + + // let mut f = tokio::fs::OpenOptions::new() + // .append(true) + // .create(true) + // .open("tests/test.xml.gz") + // .await + // .unwrap(); + // let mut count = 0; + // while let Some(items) = stream.next().await { + // let bytes = items.unwrap(); + // count += bytes.len(); + // f.write(bytes.as_ref()).await.unwrap(); + // } + // dbg!(count); + dbg!(count / 1024 / 1024); + } +} diff --git a/rdnf/src/command/mod.rs b/rdnf/src/command/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..60330ed8d3a11c97690acb0447f31b372b149b21 --- /dev/null +++ b/rdnf/src/command/mod.rs @@ -0,0 +1,3 @@ +pub mod make_cache; +pub mod repo_list; +pub mod alter; diff --git a/rdnf/src/command/repo_list.rs b/rdnf/src/command/repo_list.rs new file mode 100644 index 0000000000000000000000000000000000000000..85458016d0e1385dad2f43ceb4c8fcbd27efbcb4 --- /dev/null +++ b/rdnf/src/command/repo_list.rs @@ -0,0 +1,122 @@ +use crate::{ + cli::{Cli, Commands}, + conf::repo_conf::RepoListFilter, + default::CMDLINE_REPO_NAME, + i18n::repo_list::{ + REPOLIST_REPO_ID, REPOLIST_REPO_NAME, REPOLIST_REPO_STATUS, REPOLIST_REPO_STATUS_DISABLED, + REPOLIST_REPO_STATUS_ENABLED, + }, + Rdnf, +}; +use anyhow::bail; +use anyhow::Result; +use console::style; +impl Cli { + pub fn get_repolist_filter(&self) -> Result { + match self.command { + Commands::Repolist(ref opt) => { + let sum = opt.all as usize + opt.enabled as usize + opt.disabled as usize; + if sum > 1 { + bail!("you can only choose one of three options (all,enabled,disabled)") + }; + let filter = match opt.all { + true => RepoListFilter::All, + false => match opt.enabled { + true => RepoListFilter::Enabled, + false => match opt.disabled { + true => RepoListFilter::Disabled, + false => RepoListFilter::Enabled, + }, + }, + }; + return Ok(filter); + } + _ => Ok(RepoListFilter::Enabled), + } + } +} + +impl Rdnf { + pub fn repo_list(&self) -> Result<()> { + let filter = self.cli.get_repolist_filter()?; + let repos = self + .repo_confs + .iter() + .filter(|repo| { + // let repo_lock=repo.lock(); + if repo.name.as_str() == CMDLINE_REPO_NAME { + false + } else { + match filter { + RepoListFilter::All => true, + RepoListFilter::Enabled => repo.base.enabled, + RepoListFilter::Disabled => !repo.base.enabled, + } + } + }) + .collect::>(); + if repos.len() > 0 { + let max_name_size = repos + .iter() + .max_by(|x, y| x.name.len().cmp(&y.name.len())) + .unwrap() + .name + .len(); + let max_full_size = repos + .iter() + .max_by(|x, y| x.full_name.len().cmp(&y.full_name.len())) + .unwrap() + .full_name + .len(); + let title = format!( + "{: { + format!("{}", style(REPOLIST_REPO_STATUS_ENABLED).green()) + } + false => { + format!("{}", style(REPOLIST_REPO_STATUS_DISABLED).red()) + } + }; + let item = format!( + "{:, - clean_requirements_on_remove: Option, - gpgcheck: Option, - keepcache: Option, - repodir: Option, - cachedir: Option, - proxy: Option, - proxy_username: Option, - proxy_password: Option, - distroverpkg: Option, - excludepkgs: Option>, - minversions: Option>, - plugins: Option, - pluginconfpath: Option, - pluginpath: Option, -} - -#[derive(Debug, Deserialize, Clone)] -pub struct Configfile { - main: Main, -} -#[derive(Debug, Clone)] -pub struct ConfigMain { - pub installonly_limit: usize, - pub clean_requirements_on_remove: bool, - pub gpgcheck: bool, - pub keepcache: bool, - pub repodir: String, - pub cachedir: String, - pub distroverpkg: String, - pub excludepkgs: Option>, - pub minversions: Option>, - pub proxy: Option, - pub proxy_username: Option, - pub proxy_password: Option, - pub plugins: bool, - pub pluginconfpath: String, - pub pluginpath: String, - pub var_release_ver: String, - pub var_base_arch: String, -} -impl ConfigMain { - pub fn from(cli: &mut Cli) -> Result { - let main = match Config::builder() - .add_source(File::new(&cli.config_file, FileFormat::Ini)) - .build() - { - Ok(s) => { - let c: Result = s.try_deserialize(); - match c { - Ok(t) => { - let m = t.main; - let pkg = m.distroverpkg.unwrap_or(String::from(DEFAULT_DISTROVERPKG)); - ConfigMain { - gpgcheck: m.gpgcheck.unwrap_or(false), - installonly_limit: m.installonly_limit.unwrap_or(1), - clean_requirements_on_remove: m - .clean_requirements_on_remove - .unwrap_or(false), - keepcache: m.keepcache.unwrap_or(false), - repodir: { - match &cli.reposdir { - Some(s) => s.to_string(), - None => { - cli.installroot.to_string() - + m.repodir - .unwrap_or(String::from(DEFAULT_REPO_LOCATION)) - .trim_start_matches("/") - .trim_end_matches("/") - + "/" - } - } - }, - cachedir: cli.installroot.to_string() - + m.cachedir - .unwrap_or(String::from(DEFAULT_CACHE_LOCATION)) - .trim_start_matches("/") - .trim_end_matches("/") - + "/", - distroverpkg: pkg.clone(), - excludepkgs: m.excludepkgs, - minversions: m.minversions, - proxy: m.proxy, - proxy_username: m.proxy_username, - proxy_password: m.proxy_password, - plugins: if cli.plugins { - true - } else { - if m.plugins.unwrap_or(false) { - cli.plugins = true; - true - } else { - false - } - }, - pluginconfpath: m - .pluginconfpath - .unwrap_or(String::from(DEFAULT_PLUGIN_CONF_PATH)), - pluginpath: m.pluginpath.unwrap_or(String::from(DEFAULT_PLUGIN_PATH)), - var_release_ver: { ConfigMain::get_package_version(pkg, cli)? }, - var_base_arch: { ConfigMain::get_kernel_arch()? }, - } - } - Err(_) => { - bail!("Failed to parse config file: {}", cli.config_file) - } - } - } - Err(_) => { - bail!("Failed to read config file: {}", cli.config_file) - } - }; - if !check_dir(main.repodir.as_str())? { - bail!("Dir repodir {} don't have .repo file", main.repodir); - }; - Ok(main) - } - pub fn get_package_version(pkg: String, cli: &mut Cli) -> Result { - let ver = match cli.releasever.clone() { - Some(ver) => ver, - None => unsafe { - let p_ts = rpm_sys::ffi::rpmtsCreate(); - if p_ts.is_null() { - bail!(ERROR_RDNF_RPMTS_CREATE_FAILED); - } - let root_ptr = CString::new(cli.installroot.clone()) - .unwrap_or(CString::new("/").unwrap()); - if rpm_sys::ffi::rpmtsSetRootDir(p_ts, root_ptr.as_ptr()) != 0 { - bail!("Failed to set root dir {} for rpmts", cli.installroot); - }; - let pkg_t = CString::new(pkg.as_str()).unwrap(); - let t = pkg_t.as_ptr() as *const c_void; - let p_iter = rpmtsInitIterator(p_ts, 1047, t, 0); - if p_iter.is_null() { - bail!(ERROR_RDNF_NO_DISTROVERPKG) - }; - let p_header = rpmdbNextIterator(p_iter); - if p_header.is_null() { - bail!(ERROR_RDNF_DISTROVERPKG_READ) - }; - let p_header = headerLink(p_header); - if p_header.is_null() { - bail!(ERROR_RDNF_DISTROVERPKG_READ) - }; - let psz_version_temp = headerGetString(p_header, rpmTag_e_RPMTAG_VERSION); - if psz_version_temp.is_null() { - bail!(ERROR_RDNF_DISTROVERPKG_READ) - }; - let version = CStr::from_ptr(psz_version_temp).to_str().unwrap(); - String::from(version) - }, - }; - Ok(ver) - } - pub fn get_kernel_arch() -> Result { - let c = [0; 65]; - let mut system_info = utsname { - sysname: c, - nodename: c, - release: c, - version: c, - machine: c, - domainname: c, - }; - unsafe { - if libc::uname(&mut system_info) != 0 { - bail!("Failed to uname"); - }; - let arch = CStr::from_ptr(system_info.machine.as_ptr()) - .to_str() - .unwrap() - .to_string(); - Ok(arch) - } - } -} diff --git a/rdnf/src/conf/config_main.rs b/rdnf/src/conf/config_main.rs new file mode 100644 index 0000000000000000000000000000000000000000..de7d7099188e6322a2547bf48b9f2879f954a901 --- /dev/null +++ b/rdnf/src/conf/config_main.rs @@ -0,0 +1,168 @@ +use std::sync::Arc; + +use anyhow::bail; +use anyhow::Result; +use config::Config; +use config::FileFormat; +use config::Value; + +use crate::default::DEFAULT_CACHE_LOCATION; +use crate::default::DEFAULT_DISTROVERPKG; +use crate::default::DEFAULT_PLUGIN_CONF_PATH; +use crate::default::DEFAULT_PLUGIN_PATH; +use crate::default::DEFAULT_REPO_LOCATION; +use crate::utils::check_dir; +use crate::utils::get_os_arch; + +use super::parse_string_to_vec; + +#[derive(Debug, Clone)] +pub struct ConfigMain { + pub installonly_limit: u64, + pub clean_requirements_on_remove: bool, + pub gpgcheck: bool, + pub keepcache: bool, + pub repodir: String, + pub cachedir: String, + pub distroverpkg: String, + pub excludepkgs: Option>, + pub minversions: Option>, + pub proxy: Option, + pub proxy_username: Option, + pub proxy_password: Option, + pub plugins: bool, + pub pluginconfpath: String, + pub pluginpath: String, +} +impl Default for ConfigMain { + fn default() -> Self { + Self { + installonly_limit: 1, + clean_requirements_on_remove: false, + gpgcheck: false, + keepcache: false, + repodir: DEFAULT_REPO_LOCATION.to_string(), + cachedir: DEFAULT_CACHE_LOCATION.to_string(), + distroverpkg: DEFAULT_DISTROVERPKG.to_string(), + excludepkgs: None, + minversions: None, + proxy: None, + proxy_username: None, + proxy_password: None, + plugins: false, + pluginconfpath: DEFAULT_PLUGIN_CONF_PATH.to_string(), + pluginpath: DEFAULT_PLUGIN_PATH.to_string(), + } + } +} +impl ConfigMain { + fn update(&mut self, k: &str, v: Value) -> Result<()> { + match k { + "installonly_limit" => { + self.installonly_limit = v.into_uint()?; + } + "clean_requirements_on_remove" => { + self.clean_requirements_on_remove = v.into_bool()?; + } + "gpgcheck" => { + self.gpgcheck = v.into_bool()?; + } + "keepcache" => { + self.keepcache = v.into_bool()?; + } + + "repodir" => { + self.repodir = v.into_string()?.trim_end_matches("/").to_owned() + "/"; + } + "cachedir" => { + self.cachedir = v.into_string()?.trim_end_matches("/").to_owned() + "/"; + } + "distroverpkg" => { + self.distroverpkg = v.into_string()?; + } + "excludepkgs" => { + self.excludepkgs = parse_string_to_vec(v.into_string()?.as_str()); + } + "minversions" => { + self.minversions = parse_string_to_vec(v.into_string()?.as_str()); + } + "proxy" => { + self.proxy = Some(v.into_string()?); + } + "proxy_username" => { + self.proxy_username = Some(v.into_string()?); + } + "proxy_password" => { + self.proxy_password = Some(v.into_string()?); + } + "plugins" => { + self.plugins = v.into_bool()?; + } + "pluginconfpath" => { + self.pluginconfpath = v.into_string()?; + } + "pluginpath" => { + self.pluginpath = v.into_string()?; + } + _ => {} + } + Ok(()) + } +} +#[derive(Debug, Clone)] +pub struct RdnfConfig { + pub config_main: Arc, + pub var_release_ver: String, + pub var_base_arch: String, +} +impl RdnfConfig { + pub fn read(config_file: &str) -> Result { + let source = config::File::new(config_file, FileFormat::Ini); + match Config::builder().add_source(source).build() { + Ok(conf) => { + let main = conf.get_table("main")?; + let mut conf = ConfigMain::default(); + for (k, v) in main { + conf.update(k.as_str(), v)?; + } + // conf.read_from_HashMap(main)?; + if !check_dir(conf.repodir.as_str())? { + bail!("Dir repodir {} don't have file", conf.repodir.as_str()); + }; + return Ok(Self { + config_main: Arc::new(conf), + var_release_ver: get_release()?, + var_base_arch: get_os_arch()?, + }); + } + Err(e) => { + bail!("Failed to read conf file {} ; because {:?}", config_file, e); + } + } + } +} + +fn get_release() -> Result { + Ok(String::from("37")) +} +#[cfg(test)] +mod tests { + use crate::conf::parse_string_to_vec; + + // use super::RdnfConfig; + + // const CONF_PATH: &str = "assest/dnf.conf"; + #[test] + fn test_parse_string_to_vec() { + let s = "pkga , pkgb ; \t \n pkgc ,pkg-d"; + assert_eq!( + parse_string_to_vec(s), + Some(vec![ + "pkga".to_string(), + "pkgb".to_string(), + "pkgc".to_string(), + "pkg-d".to_string() + ]) + ); + } +} diff --git a/rdnf/src/conf/metalink.rs b/rdnf/src/conf/metalink.rs new file mode 100644 index 0000000000000000000000000000000000000000..b6a92ab17d24196eba9c54ddad505677ea1f5ff7 --- /dev/null +++ b/rdnf/src/conf/metalink.rs @@ -0,0 +1,164 @@ +use anyhow::{bail, Result}; +use indicatif::ProgressBar; +use quick_xml::de::from_str; +use reqwest::Client; +use serde::Deserialize; +use std::{fs::remove_file, path::Path}; + +use crate::{ + default::REPO_METADATA_FILE_NAME, error::DownloadError, hash::HashKind, + utils::download_single_file, +}; + +#[derive(Deserialize, Default, Debug)] +pub struct Metalink { + files: Files, +} +#[derive(Deserialize, Default, Debug)] +pub struct Files { + file: FileItem, +} +#[derive(Deserialize, Default, Debug)] +struct FileItem { + #[serde(rename = "@name")] + name: String, + timestamp: u64, + size: u64, + verification: Verification, + resources: Resources, +} +#[derive(Deserialize, Default, Debug)] +struct Verification { + #[serde(rename = "$value")] + hashs: Vec, +} +#[derive(Deserialize, Default, Debug)] +struct Hash { + #[serde(rename = "@type")] + r#type: String, + #[serde(rename = "$value")] + hash: String, +} +#[derive(Deserialize, Default, Debug)] +struct Resources { + #[serde(rename = "@maxconnections")] + max_conn: Option, + #[serde(rename = "$value")] + url: Vec, +} +#[derive(Deserialize, Default, Debug)] +struct Url { + #[serde(rename = "@protocol")] + protocol: String, + #[serde(rename = "@type")] + r#type: Option, + #[serde(rename = "@location")] + location: Option, + #[serde(rename = "@preference")] + preference: usize, + #[serde(rename = "$value")] + url: String, +} +impl Metalink { + pub async fn download_repomd>( + buf: &str, + client: &Client, + file_path: P, + repo_name: &str, + pb: &ProgressBar, + ) -> Result { + let metalink = match from_str::(buf) { + Ok(s) => s, + Err(e) => { + bail!("Failed to parse {:?} {}", file_path.as_ref(), e) + } + }; + let urls = metalink.files.file.resources.url; + let hashs = metalink.files.file.verification.hashs; + let mut urls = urls + .iter() + .filter(|x| x.protocol == "https" || x.protocol == "http") + .collect::>(); + urls.sort_by(|a, b| b.preference.cmp(&a.preference)); + let _msg = repo_name.to_owned() + "-" + REPO_METADATA_FILE_NAME; + // dbg!(msg); + // println!("{}", msg.as_str()); + for url in urls { + // dbg!(repo_name); + // match download_single_file_with_pb(client, &url.url, file_path.as_ref(), &msg, pb).await + // { + // Err(DownloadError::Network(e)) => { + // println!("{}", e); + // println!("try next site"); + // remove_file(file_path.as_ref())?; + // } + // Err(download_error) => { + // return Err(download_error.into()); + // } + // Ok(_) => { + // for hash in &hashs { + // if HashKind::from(hash.r#type.as_str()) + // .checksum(file_path.as_ref(), &hash.hash)? + // { + // return Ok(url.url.clone()); + // }; + // } + // } + // }; + match download_single_file(client, &url.url, file_path.as_ref()).await { + Err(DownloadError::Network(e)) => { + println!("{}", e); + println!("try next site"); + remove_file(file_path.as_ref())?; + } + Err(download_error) => { + return Err(download_error.into()); + } + Ok(_) => { + for hash in &hashs { + if HashKind::from(hash.r#type.as_str()) + .checksum(file_path.as_ref(), &hash.hash)? + { + return Ok(url.url.clone()); + }; + } + } + }; + } + bail!("The contents in the metalink file are incorrect"); + } +} +#[cfg(test)] +mod tests { + use std::{fs::File, io::Read}; + + use crate::conf::metalink::Metalink; + use console::Term; + use indicatif::{ProgressBar, ProgressStyle}; + use reqwest::ClientBuilder; + #[tokio::test] + async fn test_metalink() { + let mut f = File::open("assest/metalink").unwrap(); + // let mut buf=Vec::new(); + let pb = ProgressBar::new(0); + let (_, width) = Term::stdout().size(); + + let style = format!( + "{{msg:{}}}{{spinner:.green}}[{{bar:{}.cyan/blue}}]{{bytes}}/{{total_bytes}} ({{bytes_per_sec}},{{eta}})", + width / 3, + width / 3 + ); + let style = ProgressStyle::with_template(style.as_str()) + .unwrap() + .progress_chars("#>-"); + pb.set_style(style); + let mut buf = String::new(); + f.read_to_string(&mut buf).unwrap(); + let client = ClientBuilder::new().user_agent("rdnf").build().unwrap(); + Metalink::download_repomd(buf.as_str(), &client, "assest/repomd.xml", "TestRepo", &pb) + .await + .unwrap(); + // let k = from_str::(buf.as_str()).unwrap(); + // dbg!(k); + } +} diff --git a/rdnf/src/conf/mod.rs b/rdnf/src/conf/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..6ad46fcb42c515b9a4661b6789ca071ef9e880d1 --- /dev/null +++ b/rdnf/src/conf/mod.rs @@ -0,0 +1,17 @@ +pub mod config_main; +pub mod metalink; +pub mod repo_conf; +pub mod repomd; +pub fn parse_string_to_vec(s: &str) -> Option> { + let r = s + .split(&[' ', ',', ';']) + .map(|x| x.split_ascii_whitespace().collect::>()) + .flatten() + .map(String::from) + .collect::>(); + if r.len() > 0 { + Some(r) + } else { + None + } +} diff --git a/rdnf/src/conf/repo_conf.rs b/rdnf/src/conf/repo_conf.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a4ae22749b26bdafd81f3ce1c916b07be0d344b --- /dev/null +++ b/rdnf/src/conf/repo_conf.rs @@ -0,0 +1,330 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::bail; +use anyhow::Result; +use config::{Config, Map, Value}; +use glob::Pattern; +use reqwest::header; +use reqwest::header::HeaderValue; +use reqwest::Client; +use reqwest::ClientBuilder; + +use crate::cli::Cli; +use crate::default::VAR_BASEARCH; +use crate::default::VAR_RELEASEVER; + +use super::config_main::ConfigMain; +use super::config_main::RdnfConfig; +use super::parse_string_to_vec; + +#[derive(Debug, Default)] +pub struct RepoConfig { + pub name: Arc, + pub full_name: String, + pub cache_name: Option, + pub base: RepoConfigBase, + pub detail: RepoConfigDetail, +} +#[derive(Debug)] +pub struct RepoConfigBase { + pub enabled: bool, + pub skip_if_unavailable: bool, + pub gpgcheck: bool, + pub priority: i64, + pub timeout: u64, + pub retries: i64, + pub minrate: u64, + pub throttle: u64, + pub sslverify: bool, + pub lmetadata_expire: i128, + pub skip_md_filelists: bool, + pub skip_md_updateinfo: bool, + pub skip_md_other: bool, +} +#[derive(Debug, Default)] +pub struct RepoConfigDetail { + pub base_url: Option, + pub meta_link: Option, + pub url_gpg_keys: Option>, + pub username: Option, + pub password: Option, + pub ssl_ca_cert: Option, + pub ssl_client_cert: Option, + pub ssl_client_key: Option, +} +impl Default for RepoConfigBase { + fn default() -> Self { + Self { + enabled: true, + skip_if_unavailable: false, + gpgcheck: true, + sslverify: true, + lmetadata_expire: 172800, + priority: 50, + timeout: 0, + minrate: 0, + throttle: 0, + retries: 10, + skip_md_filelists: false, + skip_md_updateinfo: false, + skip_md_other: false, + } + } +} +pub enum RepoListFilter { + All, + Enabled, + Disabled, +} +impl RepoConfig { + fn create(name: String) -> Self { + Self { + name: Arc::new(name.clone()), + full_name: name, + cache_name: Default::default(), + base: RepoConfigBase::default(), + detail: RepoConfigDetail::default(), + } + } + pub fn read_all(repo_dir: &str) -> Result> { + let pattern = repo_dir.trim_end_matches("/").to_string() + "/*.repo"; + let source = glob::glob(&pattern)? + .map(|p| config::File::from(p.unwrap()).format(config::FileFormat::Ini)) + .collect::>(); + let c = Config::builder().add_source(source).build()?; + let mut repos = Vec::new(); + for (name, table) in c.try_deserialize::>()? { + let mut repo = RepoConfig::create(name); + for (key, value) in table.into_table()? { + if let Some(e) = repo.update_repo(key.as_str(), value).err() { + bail!( + "repo {} : option {} : {}", + repo.name.as_str(), + key.as_str(), + e + ) + } + } + repos.push(repo); + } + repos.sort_by(|a, b| a.base.priority.cmp(&b.base.priority)); + repos.sort_by(|a, b| a.name.cmp(&b.name)); + Ok(repos) + } + pub fn update_repo(&mut self, key: &str, v: config::Value) -> Result<()> { + match key { + "enabled" => self.base.enabled = v.into_bool()?, + + "name" => self.full_name = v.into_string()?, + + "baseurl" => { + self.detail.base_url = Some(v.into_string()?.trim_end_matches("/").to_owned() + "/") + } + + "metalink" => self.detail.meta_link = Some(v.into_string()?), + + "skip_if_unavailable" => self.base.skip_if_unavailable = v.into_bool()?, + + "gpgcheck" => self.base.gpgcheck = v.into_bool()?, + + "gpgkey" => self.detail.url_gpg_keys = parse_string_to_vec(v.into_string()?.as_str()), + + "username" => self.detail.username = Some(v.into_string()?), + + "password" => self.detail.password = Some(v.into_string()?), + + "priority" => self.base.priority = v.into_int()?, + + "timeout" => self.base.timeout = v.into_uint()?, + + "retries" => self.base.retries = v.into_int()?, + + "minrate" => self.base.minrate = v.into_uint()?, + + "throttle" => self.base.throttle = v.into_uint()?, + + "sslverify" => self.base.sslverify = v.into_bool()?, + + "sslcacert" => self.detail.ssl_ca_cert = Some(v.into_string()?), + + "sslclientcert" => self.detail.ssl_client_cert = Some(v.into_string()?), + + "sslclientkey" => self.detail.ssl_client_key = Some(v.into_string()?), + + "metadata_expire" => { + let s = v.into_string()?; + if s == "never" { + self.base.lmetadata_expire = -1; + } else { + self.base.lmetadata_expire = match s.parse::() { + Ok(t) => t, + Err(_) => { + let (num, mul) = s.split_at(s.len() - 1); + let n = match num.parse::() { + Ok(n) => n, + Err(_) => { + bail!("should be like 1 or 1d or 1h or 1m or 1s") + } + }; + match mul { + "s" => n, + "m" => 60 * n, + "h" => 60 * 60 * n, + "d" => 60 * 60 * 24 * n, + _ => { + bail!("the unit of time should be d,h,m,s(default)") + } + } + } + } + } + } + + "skip_md_filelists" => self.base.skip_md_filelists = v.into_bool()?, + + "skip_md_updateinfo" => self.base.skip_md_updateinfo = v.into_bool()?, + + "skip_md_other" => self.base.skip_md_other = v.into_bool()?, + + _ => {} + } + Ok(()) + } + fn update_cache_name(&mut self, url: &str) { + let context = self.name.to_string() + url; + let s = uuid::Builder::from_bytes(md5::compute(context).0) + .into_uuid() + .to_string(); + let (a, b) = s.split_once("-").unwrap(); + self.cache_name = Some(self.name.to_string() + "-" + a); + } +} +fn alter_repo_state_enable(repos: &mut Vec, enable: bool, pattern: &str) -> Result<()> { + match Pattern::new(pattern) { + Ok(p) => { + for repo in &mut *repos { + if p.matches(&repo.name) { + repo.base.enabled = enable; + } + } + } + Err(e) => { + bail!("Failed to enablerepo {}, because {}", &pattern, e) + } + } + Ok(()) +} +pub fn finalize_repos(cli: &Cli, config: &RdnfConfig, repos: &mut Vec) -> Result<()> { + if let Some(ref v) = cli.repoid { + alter_repo_state_enable(repos, false, "*")?; + for pattern in v { + alter_repo_state_enable(repos, true, pattern)?; + } + } + if let Some(ref v) = cli.enablerepo { + for pattern in v { + alter_repo_state_enable(repos, true, pattern)?; + } + } + if let Some(ref v) = cli.disablerepo { + for pattern in v { + alter_repo_state_enable(repos, false, pattern)?; + } + } + for ele in repos { + ele.full_name = ele + .full_name + .replace(VAR_RELEASEVER, &config.var_release_ver) + .replace(VAR_BASEARCH, &config.var_base_arch); + + match &ele.detail.meta_link { + Some(s) => { + let meta_link = s + .replace(VAR_RELEASEVER, &config.var_release_ver) + .replace(VAR_BASEARCH, &config.var_base_arch); + ele.update_cache_name(&meta_link); + ele.detail.meta_link = Some(meta_link); + } + None => match &ele.detail.base_url { + Some(url) => { + ele.update_cache_name(url.clone().as_str()); + } + None => { + bail!( + "Repo {} doesn't have base_url or meta_link ; please check", + ele.name + ); + } + }, + } + if let Some(ref s) = ele.detail.url_gpg_keys { + let r = s + .iter() + .map(|x| { + x.replace(VAR_RELEASEVER, &config.var_release_ver) + .replace(VAR_BASEARCH, &config.var_base_arch) + }) + .collect::>(); + ele.detail.url_gpg_keys = if r.len() > 0 { Some(r) } else { None } + } + } + Ok(()) +} +impl RepoConfig { + pub fn get_req_client(&self, config: &ConfigMain) -> Result { + let mut headers = header::HeaderMap::new(); + if let Some(username) = &self.detail.username { + headers.insert( + header::AUTHORIZATION, + basic_auth(username, self.detail.password.clone()), + ); + } + if let Some(username) = &config.proxy_username { + headers.insert( + header::PROXY_AUTHORIZATION, + basic_auth(username, config.proxy_password.clone()), + ); + } + let mut client_builder = ClientBuilder::new() + .user_agent("rdnf") + .default_headers(headers); + if self.base.timeout != 0 { + client_builder = client_builder.timeout(Duration::from_secs(self.base.timeout)); + } + Ok(client_builder.build()?) + } +} +fn basic_auth(username: U, password: Option

) -> HeaderValue +where + U: std::fmt::Display, + P: std::fmt::Display, +{ + use base64::prelude::BASE64_STANDARD; + use base64::write::EncoderWriter; + use std::io::Write; + + let mut buf = b"Basic ".to_vec(); + { + let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD); + let _ = write!(encoder, "{}:", username); + if let Some(password) = password { + let _ = write!(encoder, "{}", password); + } + } + let mut header = HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue"); + header.set_sensitive(true); + header +} +#[cfg(test)] +mod tests { + use super::RepoConfig; + use anyhow::Result; + const REPO_DIR: &str = "assest/repos.d"; + #[test] + fn test_read() -> Result<()> { + let repos = RepoConfig::read_all(REPO_DIR)?; + assert_eq!(repos.len(), 2); + Ok(()) + } +} diff --git a/rdnf/src/conf/repomd.rs b/rdnf/src/conf/repomd.rs new file mode 100644 index 0000000000000000000000000000000000000000..2767d9d6f82cb1900f6430843b0f568493383d69 --- /dev/null +++ b/rdnf/src/conf/repomd.rs @@ -0,0 +1,71 @@ +use anyhow::{bail, Result}; +use quick_xml::de::from_str; +use serde::Deserialize; +use std::{fs::File, io::Read, path::Path}; + +#[derive(Deserialize, Default, Debug)] +pub struct Repomd { + revision: u64, + #[serde(rename = "$value")] + datas: Vec, // files: Files, +} +impl Repomd { + pub fn get_primary_location(&self) -> &str { + self.datas + .iter() + .find(|x| x.r#type == "primary") + .unwrap() + .location + .href + .as_str() + } + pub fn get_primary_size(&self) -> u64 { + self.datas + .iter() + .find(|x| x.r#type == "primary") + .unwrap() + .size + } + pub fn parse_from>(path: P) -> Result { + let mut f = File::open(path.as_ref())?; + let mut buf = String::new(); + f.read_to_string(&mut buf)?; + match from_str::(buf.as_str()) { + Ok(repomd) => return Ok(repomd), + Err(e) => { + bail!("Failed to parse {:?} {} \n {}", path.as_ref(), e, buf) + } + } + } +} + +#[derive(Deserialize, Default, Debug)] +struct Data { + #[serde(rename = "@type")] + r#type: String, + #[serde(rename = "checksum")] + checksum: CheckSum, + location: Location, + timestamp: u64, + size: u64, +} +#[derive(Deserialize, Default, Debug)] +struct CheckSum { + #[serde(rename = "@type")] + r#type: String, + #[serde(rename = "$value")] + checksum: String, +} +#[derive(Deserialize, Default, Debug)] +struct Location { + #[serde(rename = "@href")] + href: String, +} +#[test] +fn test_repomd() { + let mut f = File::open("assest/repomd.xml").unwrap(); + let mut buf = String::new(); + f.read_to_string(&mut buf).unwrap(); + let repo = from_str::(buf.as_str()).unwrap(); + dbg!(repo); +} diff --git a/rdnf/src/default.rs b/rdnf/src/default.rs index b75d8e2c4ef8004cf6bed2f12a863f18ab1e0548..c7c6bdd80c2dce77c3e3b095a273b3645bc304d9 100644 --- a/rdnf/src/default.rs +++ b/rdnf/src/default.rs @@ -1,9 +1,10 @@ pub const RDNF_NAME: &str = "rdnf"; // pub const RDNF_CONF_FILE: &str = "/etc/rdnf/rdnf.conf"; -pub const RDNF_CONF_FILE:&str="/etc/dnf/dnf.conf"; +pub const RDNF_CONF_FILE: &str = "/etc/dnf/dnf.conf"; // pub const SYSTEM_LIBDIR: &str = "/usr/local/lib64"; pub const DEFAULT_REPO_LOCATION: &str = "/etc/yum.repos.d"; -pub const DEFAULT_CACHE_LOCATION: &str = "/var/cache/rdnf"; +pub const DEFAULT_CACHE_LOCATION: &str = "/var/cache/rdnf/"; +pub const DEFAULT_RPMDB_LOCATION: &str = "/var/lib/rpm/rpmdb.sqlite"; // pub const DEFAULT_DATA_LOCATION: &str = "/var/lib/rdnf"; // pub const HISTORY_DB_FILE:&str="history.db"; pub const DEFAULT_DISTROVERPKG: &str = "system-release"; @@ -11,13 +12,18 @@ pub const DEFAULT_PLUGIN_CONF_PATH: &str = "/etc/tdnf/pluginconf.d"; pub const DEFAULT_PLUGIN_PATH: &str = "/usr/local/lib64/tdnf-plugins"; pub const VAR_RELEASEVER: &str = "$releasever"; pub const VAR_BASEARCH: &str = "$basearch"; +pub const DEFAULT_RELEASE: &str = "37"; +pub const DEFAULT_ARCH: &str = "x86_64"; +pub const INSTALLED: &str = "Installed"; pub const SYSTEM_REPO_NAME: &str = "@System"; pub const CMDLINE_REPO_NAME: &str = "@cmdline"; pub const REPO_METADATA_MARKER: &str = "lastrefresh"; pub const REPODATA_DIR_NAME: &str = "repodata"; pub const SOLVCACHE_DIR_NAME: &str = "solvcache"; pub const RPM_CACHE_DIR_NAME: &str = "rpms"; -pub const GPGKEY_CACHE_DIR_NAME:&str="keys"; +pub const GPGKEY_CACHE_DIR_NAME: &str = "keys"; +pub const INSTALLED_CACHE_DIR_NAME: &str = "installed"; +pub const INSTALLED_SEQ: &str = "installed_seq"; pub const REPO_METADATA_FILE_NAME: &str = "repomd.xml"; pub const REPO_METADATA_FILE_PATH: &str = "repodata/repomd.xml"; pub const REPO_METALINK_FILE_NAME: &str = "metalink"; diff --git a/rdnf/src/error.rs b/rdnf/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..b84afecc6d43633a41696f3fdafe61a2d4c9e52c --- /dev/null +++ b/rdnf/src/error.rs @@ -0,0 +1,25 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum SystemError { + #[error("Can't invoke \"uname\" to get arch : {0}")] + ArchError(String), + // bail!("can't create {} lock on {}", lock_path, descr) + #[error("Can't create {des:?} lock on {lock_path:?}")] + FailedCreateLock { lock_path: String, des: String }, + // bail!("Failed to acquire rdnf_instance lock") + #[error("Failed to acquire rdnf_instance lock , maybe an instance already exists")] + InstanceExist, + #[error("Failed to acquire rpmdb.sqlite lock , maybe an instance already exists")] + RpmdbLocked, +} + +#[derive(Error, Debug)] +pub enum DownloadError { + #[error("The network site connection cannot be reached ; {0}")] + Network(String), + #[error("Io error")] + IoError(String), + #[error("Can't get systime")] + TimeError, +} diff --git a/rdnf/src/errors.rs b/rdnf/src/errors.rs deleted file mode 100644 index f5d5420651fee7b2e08b13867f316da10669ee13..0000000000000000000000000000000000000000 --- a/rdnf/src/errors.rs +++ /dev/null @@ -1,166 +0,0 @@ -pub const ERROR_RDNF_INVALID_PARAMETER: &str = "unknown system error,Invalid argument"; -pub const ERROR_RDNF_OUT_OF_MEMORY: &str = "unknown system error , Out of memory "; -pub const ERROR_RDNF_NO_DATA: &str = "unknown system error , No data available"; -pub const ERROR_RDNF_NO_MATCH: &str = "No matching packages"; -pub const ERROR_RDNF_ALREADY_EXISTS: &str = "unknown system error , File exists"; -pub const ERROR_RDNF_NO_DISTROVERPKG:&str= "distroverpkg config entry is set to a package that is not installed. Check /etc/tdnf/tdnf.conf"; -pub const ERROR_RDNF_RPM_INIT: &str = "Error initializing rpm config.Check /usr/lib/rpm/rpmrc"; -pub const ERROR_RDNF_DISTROVERPKG_READ: &str = "There was an error reading version of distroverpkg"; -pub const ERROR_RDNF_SOLV_FAILED: &str = "Solv general runtime error"; -pub const ERROR_RDNF_SOLV_IO: &str = "Solv - I/O error"; -pub const ERROR_RDNF_SOLV_CHKSUM: &str = "Solv - Checksum creation failed"; -pub const ERROR_RDNF_REPO_WRITE: &str = "Solv - Failed to write repo"; -pub const ERROR_RDNF_ADD_SOLV: &str = "Solv - Failed to add solv"; -pub const ERROR_RDNF_RPMTS_CREATE_FAILED: &str = "RPM transaction set could not be created"; -pub const ERROR_RDNF_OPERATION_ABORTED: &str = "Operation aborted."; -pub const ERROR_RDNF_REPO_NOT_FOUND: &str = "Repo was not found"; - -pub const ERROR_RDNF_CACHE_REFRESH: &str = r#"rdnf repo cache needs to be refreshed -You cam use one of the below methods to workaround this -1. Login as root and refresh cache -2. Use --config option and create repo cache where you have access -3. Use --cacheonly and use existing cache in the system"#; -pub const ERROR_RDNF_NO_GPGKEY_CONF_ENTRY:&str="gpgkey entry is missing for this repo. please add gpgkey in repo file or use --nogpgcheck to ignore."; -pub const ERROR_RDNF_URL_INVALID: &str = "URL is invalid."; -pub const ERROR_RDNF_RPM_CHECK: &str = "rpm check reported errors"; -pub const ERROR_RDNF_INVALID_RESOLVE_ARG: &str = "Invalid argument in resolve"; -pub const ERROR_RDNF_SELF_ERASE: &str = - "The operation would result in removing the protected package : tdnf"; -pub const ERROR_RDNF_NOTHING_TO_DO: &str = "Nothing to do."; -pub const ERROR_RDNF_TRANSACTION_FAILED: &str = "rpm transaction failed"; -pub const ERROR_RDNF_INVALID_PUBKEY_FILE: &str = "public key file is invalid or corrupted"; -pub const ERROR_RDNF_RPMTD_CREATE_FAILED: &str = - "RPM data container could not be created. Use --nogpgcheck to ignore."; - -pub const ERROR_RDNF_RPM_GET_RSAHEADER_FAILED: &str = - "RPM not signed. Use --skipsignature or --nogpgcheck to ignore."; -pub const ERROR_RDNF_RPM_GPG_PARSE_FAILED: &str = - "RPM failed to parse gpg key. Use --nogpgcheck to ignore."; -pub const ERROR_RDNF_RPM_GPG_NO_MATCH: &str = - "RPM is signed but failed to match with known keys. Use --nogpgcheck to ignore."; - -// pub const ERROR_RDNF_INVALID_ADDRESS: &str = "unknown system error , Bad address "; -// pub const ERROR_RDNF_CALL_INTERRUPTED: &str = "unknown system error , Interrupted system call"; -// pub const ERROR_RDNF_FILESYS_IO: &str = "unknown system error , I/O error"; -// pub const ERROR_RDNF_SYM_LOOP: &str = "unknown system error , Too many symbolic links encountered "; -// pub const ERROR_RDNF_NAME_TOO_LONG: &str = "unknown system error , File name too long"; -// pub const ERROR_RDNF_CALL_NOT_SUPPORTED: &str = -// "unknown system error , Invalid system call number "; -// pub const ERROR_RDNF_INVALID_DIR: &str = "unknown system error , Not a directory"; -// pub const ERROR_RDNF_OVERFLOW: &str = -// "unknown system error , Value too large for defined data type"; -// pub const ERROR_RDNF_PACKAGE_REQUIRED: &str = "Package name expected but was not provided"; -// pub const ERROR_RDNF_CONF_FILE_LOAD: &str = "Error loading tdnf conf (/etc/tdnf/tdnf.conf)"; -// pub const ERROR_RDNF_REPO_FILE_LOAD: &str = -// "Error loading tdnf repo (normally under /etc/yum.repos.d"; -// pub const ERROR_RDNF_INVALID_REPO_FILE: &str = "Encountered an invalid repo file"; -// pub const ERROR_RDNF_REPO_DIR_OPEN:&str= "Error opening repo dir. Check if the repodir configured in tdnf.conf exists (usually /etc/yum.repos.d)"; -// pub const ERROR_RDNF_SET_PROXY: &str = "There was an error setting the proxy server."; -// pub const ERROR_RDNF_SET_PROXY_USERPASS: &str = -// "There was an error setting the proxy server user and pass"; -// pub const ERROR_RDNF_INVALID_ALLOCSIZE: &str = -// "A memory allocation was requested with an invalid size"; -// pub const ERROR_RDNF_STRING_TOO_LONG: &str = "Requested string allocation size was too long."; -// pub const ERROR_RDNF_NO_ENABLED_REPOS:&str= "There are no enabled repos.\n Run \"tdnf repolist all\" to see the repos you have.\n You can enable repos by\n 1. -// by passing in --enablerepo \n 2. editing repo files in your repodir(usually /etc/yum.repos.d)"; -// pub const ERROR_RDNF_PACKAGELIST_EMPTY: &str = "Packagelist was empty"; -// pub const ERROR_RDNF_GOAL_CREATE: &str = "Error creating goal"; - -// pub const ERROR_RDNF_CLEAN_UNSUPPORTED: &str = -// "Clean type specified is not supported in this release. Please try clean all."; -// pub const ERROR_RDNF_SOLV_BASE: &str = "Solv base error"; - -// pub const ERROR_RDNF_SOLV_OP: &str = "Solv client programming error"; -// pub const ERROR_RDNF_SOLV_LIBSOLV: &str = "Solv error propagted from libsolv"; - -// pub const ERROR_RDNF_SOLV_CACHE_WRITE: &str = "Solv - cache write error"; -// pub const ERROR_RDNF_SOLV_QUERY: &str = "Solv - ill formed query"; -// pub const ERROR_RDNF_SOLV_ARCH: &str = "Solv - unknown arch"; -// pub const ERROR_RDNF_SOLV_VALIDATION: &str = "Solv - validation check failed"; -// pub const ERROR_RDNF_SOLV_NO_SOLUTION: &str = "Solv - goal found no solutions"; -// pub const ERROR_RDNF_SOLV_NO_CAPABILITY: &str = "Solv - the capability was not available"; - -// pub const ERROR_RDNF_SOLV_CACHE_NOT_CREATED: &str = "Solv - Solv cache not found"; - -// pub const ERROR_RDNF_REPO_BASE: &str = "Repo error base"; -// pub const ERROR_RDNF_SET_SSL_SETTINGS: &str = -// "There was an error while setting SSL settings for the repo."; -// pub const ERROR_RDNF_REPO_PERFORM: &str = "Error during repo handle execution"; -// pub const ERROR_RDNF_REPO_GETINFO: &str = "Repo during repo result getinfo"; - -// pub const ERROR_RDNF_NO_SEARCH_RESULTS: &str = "No matches found"; -// pub const ERROR_RDNF_RPMRC_NOTFOUND: &str = -// "rpm generic error - not found (possible corrupt rpm file)"; -// pub const ERROR_RDNF_RPMRC_FAIL: &str = "rpm generic failure"; -// pub const ERROR_RDNF_RPMRC_NOTTRUSTED: &str = "rpm signature is OK, but key is not trusted"; -// pub const ERROR_RDNF_RPMRC_NOKEY:&str="public key is unavailable. install public key using rpm --import or use --nogpgcheck to ignore."; - -// pub const ERROR_RDNF_KEYURL_UNSUPPORTED: &str = -// "GpgKey Url schemes other than file are not supported"; -// pub const ERROR_RDNF_KEYURL_INVALID: &str = "GpgKey Url is invalid"; -// pub const ERROR_RDNF_RPM_NOT_SIGNED: &str = "RPM not signed. Use --nogpgcheck to ignore."; - -// pub const ERROR_RDNF_AUTOERASE_UNSUPPORTED: &str = "autoerase / autoremove is not supported."; - -// pub const ERROR_RDNF_METADATA_EXPIRE_PARSE: &str = -// "metadata_expire value could not be parsed. Check your repo files."; - -// pub const ERROR_RDNF_DOWNGRADE_NOT_ALLOWED:&str="a downgrade is not allowed below the minimal version. Check 'minversions' in the configuration."; -// pub const ERROR_RDNF_PERM: &str = "Operation not permitted. You have to be root."; -// pub const ERROR_RDNF_OPT_NOT_FOUND: &str = "A required option was not found"; - -// pub const ERROR_RDNF_INVALID_INPUT: &str = "Invalid input."; -// pub const ERROR_RDNF_CACHE_DISABLED: &str = "cache only is set, but no repo data found"; -// pub const ERROR_RDNF_CACHE_DIR_OUT_OF_DISK_SPACE:&str="Insufficient disk space at cache directory /var/cache/tdnf (unless specified differently in config). Try freeing space first."; -// pub const ERROR_RDNF_EVENT_CTXT_ITEM_NOT_FOUND:&str="An event context item was not found. This is usually related to plugin events. Try --noplugins to deactivate all plugins or --disableplugin= to deactivate a specific one. You can permanently deactivate an offending plugin by setting enable=0 in the plugin config file."; -// pub const ERROR_RDNF_EVENT_CTXT_ITEM_INVALID_TYPE:&str="An event item type had a mismatch. This is usually related to plugin events. Try --noplugins to deactivate all plugins or --disableplugin= to deactivate a specific one. You can permanently deactivate an offending plugin by setting enable=0 in the plugin config file."; - -// pub const ERROR_RDNF_BASEURL_DOES_NOT_EXISTS: &str = -// "Base URL and Metalink URL not found in the repo file"; -// pub const ERROR_RDNF_CHECKSUM_VALIDATION_FAILED: &str = -// "Checksum Validation failed for the repomd.xml downloaded using URL from metalink"; -// pub const ERROR_RDNF_METALINK_RESOURCE_VALIDATION_FAILED: &str = -// "No Resource present in metalink file for file download"; -// pub const ERROR_RDNF_FIPS_MODE_FORBIDDEN: &str = "API call to digest API forbidden in FIPS mode!"; -// pub const ERROR_RDNF_CURLE_UNSUPPORTED_PROTOCOL: &str = "Curl doesn't Support this protocol"; -// pub const ERROR_RDNF_CURLE_FAILED_INIT: &str = "Curl Init Failed"; -// pub const ERROR_RDNF_CURLE_URL_MALFORMAT: &str = -// "URL seems to be corrupted. Please clean all and makecache"; -// pub const ERROR_RDNF_SYSTEM_BASE: &str = "unknown system error"; -// pub const ERROR_RDNF_ML_PARSER_INVALID_DOC_OBJECT: &str = -// "Failed to parse and create document tree"; -// pub const ERROR_RDNF_ML_PARSER_INVALID_ROOT_ELEMENT: &str = "Root element not found"; -// pub const ERROR_RDNF_ML_PARSER_MISSING_FILE_ATTR: &str = "Missing filename in metalink file"; -// pub const ERROR_RDNF_ML_PARSER_INVALID_FILE_NAME: &str = "Invalid filename present"; -// pub const ERROR_RDNF_ML_PARSER_MISSING_FILE_SIZE: &str = "Missing file size in metalink file"; -// pub const ERROR_RDNF_ML_PARSER_MISSING_HASH_ATTR: &str = "Missing attribute in hash tag"; -// pub const ERROR_RDNF_ML_PARSER_MISSING_HASH_CONTENT: &str = "Missing content in hash tag value"; -// pub const ERROR_RDNF_ML_PARSER_MISSING_URL_ATTR: &str = "Missing attribute in url tag"; -// pub const ERROR_RDNF_ML_PARSER_MISSING_URL_CONTENT: &str = "Missing content in url tag value"; -// pub const ERROR_RDNF_HISTORY_ERROR: &str = "History database error"; -// pub const ERROR_RDNF_HISTORY_NODB: &str = "History database does not exist"; - - - -// pub const ERROR_RDNF_BASE: &str = "Generic base error."; -// pub const ERROR_RDNF_INVALID_ARGUMENT: &str = "Invalid argument."; -// pub const ERROR_RDNF_CLEAN_REQUIRES_OPTION: &str = -// "Clean requires an option: packages, metadata, dbcache, plugins, expire-cache, all"; -// pub const ERROR_RDNF_NOT_ENOUGH_ARGS: &str = -// "The command line parser could not continue. Expected at least one argument."; - -// pub const ERROR_RDNF_OPTION_NAME_INVALID: &str = "Command line error: option is invalid."; -// pub const ERROR_RDNF_OPTION_ARG_REQUIRED: &str = "Command line error: expected one argument."; -// pub const ERROR_RDNF_OPTION_ARG_UNEXPECTED: &str = "Command line error: argument was unexpected."; -// pub const ERROR_RDNF_CHECKLOCAL_EXPECT_DIR: &str = -// "check-local requires path to rpm directory as a parameter"; -// pub const ERROR_RDNF_PROVIDES_EXPECT_ARG: &str = "Need an item to match."; -// pub const ERROR_RDNF_SETOPT_NO_EQUALS: &str = -// "Missing equal sign in setopt argument. setopt requires an argument of the form key=value."; -// pub const ERROR_RDNF_NO_SUCH_CMD: &str = "Please check your command"; -// pub const ERROR_RDNF_DOWNLOADDIR_REQUIRES_DOWNLOADONLY: &str = -// "--downloaddir requires --downloadonly"; -// pub const ERROR_RDNF_ONE_DEP_ONLY: &str = "only one dependency allowed"; -// pub const ERROR_RDNF_ALLDEPS_REQUIRES_DOWNLOADONLY: &str = "--alldeps requires --downloadonly"; -// pub const ERROR_RDNF_FILE_NOT_FOUND: &str = "unknown system error , No such file or directory"; -// pub const ERROR_RDNF_ACCESS_DENIED: &str = "unknown system error , Permission denied"; \ No newline at end of file diff --git a/rdnf/src/lock.rs b/rdnf/src/flock.rs similarity index 50% rename from rdnf/src/lock.rs rename to rdnf/src/flock.rs index 46b7550d4b90e45a11b523382ee288a1c0c71186..2aa048584e33b5d6820c49685b86250ff6138c8a 100644 --- a/rdnf/src/lock.rs +++ b/rdnf/src/flock.rs @@ -1,10 +1,14 @@ +use libc::geteuid; use std::{ fs::{File, OpenOptions}, + os::fd::AsRawFd, str::FromStr, }; -use anyhow::{bail, Result}; -use rustix::fd::AsRawFd; +use crate::{ + default::{DEFAULT_RPMDB_LOCATION, RDNF_INSTANCE_LOCK_FILE}, + error::SystemError, +}; pub enum RdnfFlockMode { READ, @@ -18,7 +22,7 @@ pub struct Rdnflock { pub descr: String, fdrefs: usize, } -pub fn flock_new(lock_path: &str, descr: &str) -> Result { +pub fn flock_new(lock_path: &str, descr: &str) -> Result { let flock = match OpenOptions::new() .read(true) .write(true) @@ -41,13 +45,17 @@ pub fn flock_new(lock_path: &str, descr: &str) -> Result { fdrefs: 1, }, Err(_) => { - bail!("can't create {} lock on {}", lock_path, descr) + // bail!("can't create {} lock on {}", lock_path, descr) + return Err(SystemError::FailedCreateLock { + lock_path: lock_path.to_string(), + des: descr.to_string(), + }); } }, }; Ok(flock) } -pub fn flock_acquire(lock: &mut Rdnflock, mode: RdnfFlockMode) -> Result { +pub fn flock_acquire(lock: &mut Rdnflock, mode: RdnfFlockMode) -> bool { let mut res: bool = false; if lock.fdrefs > 1 { res = true @@ -75,7 +83,7 @@ pub fn flock_acquire(lock: &mut Rdnflock, mode: RdnfFlockMode) -> Result { } } lock.fdrefs += res as usize; - Ok(res) + res } pub fn flock_release(lock: &mut Rdnflock) { if lock.fdrefs == 2 { @@ -91,12 +99,51 @@ pub fn flock_release(lock: &mut Rdnflock) { } } } -#[cfg(test)] -mod tests { - #[test] - fn test_flock() { - // let c=umask; - // let t=c.to_string(); - // println!("c {}",t); +pub fn is_already_running() -> Result<(), SystemError> { + if unsafe { geteuid() } == 0 { + let mut lock = flock_new(RDNF_INSTANCE_LOCK_FILE, "rdnf_instance")?; + if !flock_acquire(&mut lock, RdnfFlockMode::WriteRead) { + match lock.openmode { + RdnfFlockMode::WriteRead => { + println!("waiting for {} lock on {}", lock.descr, lock.path); + if !flock_acquire(&mut lock, RdnfFlockMode::Wait) { + flock_release(&mut lock); + return Err(SystemError::FailedCreateLock { + lock_path: lock.path, + des: lock.descr, + }); + } + } + _ => { + flock_release(&mut lock); + return Err(SystemError::InstanceExist); + } + } + } + } + Ok(()) +} +pub fn lock_rpmdb() -> Result<(), SystemError> { + if unsafe { geteuid() } == 0 { + let mut lock = flock_new(DEFAULT_RPMDB_LOCATION, "rpmdb.sqlite")?; + if !flock_acquire(&mut lock, RdnfFlockMode::WriteRead) { + match lock.openmode { + RdnfFlockMode::WriteRead => { + println!("waiting for {} lock on {}", lock.descr, lock.path); + if !flock_acquire(&mut lock, RdnfFlockMode::Wait) { + flock_release(&mut lock); + return Err(SystemError::FailedCreateLock { + lock_path: lock.path, + des: lock.descr, + }); + } + } + _ => { + flock_release(&mut lock); + return Err(SystemError::RpmdbLocked); + } + } + } } + Ok(()) } diff --git a/rdnf/src/goal.rs b/rdnf/src/goal.rs deleted file mode 100644 index 1ed897cb85a6279561b97eed7820ae5a3c9a7696..0000000000000000000000000000000000000000 --- a/rdnf/src/goal.rs +++ /dev/null @@ -1,413 +0,0 @@ -use std::{ffi::CString, mem::size_of}; - -use crate::{ - c_lib::{ - create_dataiterator_empty, get_queue_element_value, map_set, map_setall, pool_id2solvable, - queue_push, queue_push2, solv_add_flags_to_jobs, - }, - cli::AlterOption, - errors::{ - ERROR_RDNF_ALREADY_EXISTS, ERROR_RDNF_INVALID_PARAMETER, ERROR_RDNF_INVALID_RESOLVE_ARG, - ERROR_RDNF_OUT_OF_MEMORY, - }, - solv::{ - rdnf_pkg::{init_map, solv_add_excludes, SkipProblem}, - rdnf_query::{ - init_queue, - }, - SolvPackageList, - }, - sub_command::{install::{is_glob, AlterType}, info::{PkgInfo, PkgInfoLevel}}, - Rdnf, -}; -use anyhow::{bail, Result}; -use glob::Pattern; - -use solv_sys::ffi::{ - dataiterator_free, dataiterator_init, dataiterator_step, map_grow, map_init, map_subtract, - pool_evrcmp_str, s_Map, s_Solver, solv_knownid_SOLVABLE_EVR, solv_knownid_SOLVABLE_NAME, - solvable_lookup_str, solver_create, solver_create_transaction, solver_get_unneeded, - solver_set_flag, solver_solve, testcase_write, transaction_type, Queue, Repo, Transaction, - EVRCMP_COMPARE, SEARCH_STRING, SOLVER_ERASE, SOLVER_FLAG_ALLOW_DOWNGRADE, - SOLVER_FLAG_ALLOW_UNINSTALL, SOLVER_FLAG_ALLOW_VENDORCHANGE, SOLVER_FLAG_BEST_OBEY_POLICY, - SOLVER_FLAG_INSTALL_ALSO_UPDATES, SOLVER_FLAG_KEEP_ORPHANS, SOLVER_FLAG_YUM_OBSOLETES, - SOLVER_FORCEBEST, SOLVER_INSTALL, SOLVER_SOLVABLE, SOLVER_SOLVABLE_ALL, - SOLVER_TRANSACTION_DOWNGRADE, SOLVER_TRANSACTION_ERASE, SOLVER_TRANSACTION_INSTALL, - SOLVER_TRANSACTION_OBSOLETED, SOLVER_TRANSACTION_REINSTALL, SOLVER_TRANSACTION_SHOW_ACTIVE, - SOLVER_TRANSACTION_SHOW_ALL, SOLVER_TRANSACTION_SHOW_OBSOLETES, SOLVER_TRANSACTION_UPGRADE, - SOLVER_UPDATE, TESTCASE_RESULT_PROBLEMS, TESTCASE_RESULT_TRANSACTION, -}; -impl Rdnf { - pub fn get_pkgs_with_specified_type( - &self, - trans: *mut Transaction, - dw_type: i32, - ) -> Result>> { - let mut solved_pkgs = init_queue(); - unsafe { - for i in 0..(*trans).steps.count { - let pkg = get_queue_element_value(&mut (*trans).steps, i as u32); - let pkg_type = if dw_type == SOLVER_TRANSACTION_OBSOLETED as i32 { - transaction_type(trans, pkg, SOLVER_TRANSACTION_SHOW_OBSOLETES as i32) - } else { - transaction_type( - trans, - pkg, - (SOLVER_TRANSACTION_SHOW_ACTIVE | SOLVER_TRANSACTION_SHOW_ALL) as i32, - ) - }; - if dw_type == pkg_type { - queue_push(&mut solved_pkgs, pkg); - } - } - } - let pkg_list = match SolvPackageList::queue_to_pkg_list(&mut solved_pkgs) { - Ok(s) => Some(s), - Err(_) => None, - }; - if pkg_list.is_some() { - if pkg_list.as_ref().unwrap().get_size() > 0 { - let pkg_infos = PkgInfo::populate_pkg_info(&self.rc.sack, &pkg_list.unwrap(),PkgInfoLevel::Details)?; - if pkg_infos.is_empty() { - Ok(None) - } else { - Ok(Some(pkg_infos)) - } - } else { - Ok(None) - } - } else { - Ok(None) - } - } - pub fn goal( - &self, - pkg_list: &mut Queue, - alter_type: AlterType, - aler_args: &AlterOption, - ) -> Result { - let excludes = self.pkgs_to_exclude()?; - let mut queue_job = init_queue(); - if alter_type.is_upgrade_all() { - queue_push2( - &mut queue_job, - (SOLVER_UPDATE | SOLVER_SOLVABLE_ALL) as i32, - 0, - ); - } else if alter_type.is_distro_sync() { - } else { - if pkg_list.count == 0 { - bail!(ERROR_RDNF_ALREADY_EXISTS); - } - for i in 0..pkg_list.count { - let id = get_queue_element_value(pkg_list as *const Queue, i as u32); - self.add_goal(alter_type.clone(), &mut queue_job, id, &excludes)?; - } - } - let mut flags = 0; - if aler_args.best { - flags = flags | SOLVER_FORCEBEST; - } - solv_add_flags_to_jobs(&mut queue_job as *mut Queue, flags as i32); - - if !excludes.is_empty() { - solv_add_excludes(self.rc.sack.pool, &excludes); - } - self.solv_add_min_version(); - let solv = unsafe { solver_create(self.rc.sack.pool) }; - if solv.is_null() { - bail!(ERROR_RDNF_OUT_OF_MEMORY); - } - if aler_args.allow_erasing || alter_type.is_erase() || alter_type.is_auto_erase() { - unsafe { - solver_set_flag(solv, SOLVER_FLAG_ALLOW_UNINSTALL as i32, 1); - }; - } - let n_problems = unsafe { - solver_set_flag(solv, SOLVER_FLAG_BEST_OBEY_POLICY as i32, 1); - solver_set_flag(solv, SOLVER_FLAG_ALLOW_VENDORCHANGE as i32, 1); - solver_set_flag(solv, SOLVER_FLAG_KEEP_ORPHANS as i32, 1); - solver_set_flag(solv, SOLVER_FLAG_BEST_OBEY_POLICY as i32, 1); - solver_set_flag(solv, SOLVER_FLAG_YUM_OBSOLETES as i32, 1); - solver_set_flag(solv, SOLVER_FLAG_ALLOW_DOWNGRADE as i32, 1); - solver_set_flag(solv, SOLVER_FLAG_INSTALL_ALSO_UPDATES as i32, 1); - solver_solve(solv, &mut queue_job) - }; - if n_problems > 0 { - let mut skip_problem = self.get_skip_problem_opt(aler_args); - if alter_type.is_upgrade() && !excludes.is_empty() { - skip_problem.disabled = true; - } - self.rc.sack.solv_report_problems(solv, skip_problem)?; - } - let p_trans = unsafe { solver_create_transaction(solv) }; - if p_trans.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - if aler_args.debug_solver { - let result_flags = TESTCASE_RESULT_TRANSACTION | TESTCASE_RESULT_PROBLEMS; - unsafe { - let dir = CString::new("debugdata").unwrap(); - if testcase_write( - solv, - dir.as_ptr(), - result_flags as i32, - 0 as *mut i8, - 0 as *mut i8, - ) == 0 - { - println!("Could not write debugdata to folder {}", "debugdata") - } - }; - } - Ok(self.goal_get_all_results_ignore_no_data(alter_type, p_trans, solv)?) - } - pub fn add_goal( - &self, - alter_type: AlterType, - queue_job: &mut Queue, - id: i32, - excludes: &Vec, - ) -> Result<()> { - if !excludes.is_empty() { - let pkg_name = self.rc.sack.solv_get_pkg_name_by_id(id)?; - for ele in excludes { - if is_glob(ele.as_str()) { - let p = Pattern::new(ele.as_str())?; - if p.matches(pkg_name.as_str()) { - return Ok(()); - }; - } else { - if ele == pkg_name.as_str() { - return Ok(()); - } - } - } - } - match alter_type { - AlterType::DownGrade | AlterType::DownGradeAll => { - queue_push2(queue_job, (SOLVER_SOLVABLE | SOLVER_INSTALL) as i32, id); - } - AlterType::AutoErase | AlterType::Erase => { - queue_push2(queue_job, (SOLVER_SOLVABLE | SOLVER_ERASE) as i32, id); - } - AlterType::ReInstall | AlterType::Install | AlterType::Upgrade => { - queue_push2(queue_job, (SOLVER_SOLVABLE | SOLVER_INSTALL) as i32, id); - } - // AlterType::AutoErase => { - // queue_push2( - // queue_job, - // (SOLVER_SOLVABLE | SOLVER_USERINSTALLED) as i32, - // id, - // ); - // } - _ => { - bail!(ERROR_RDNF_INVALID_RESOLVE_ARG) - } - } - Ok(()) - } - pub fn solv_add_min_version(&self) { - if self.rc.conf.minversions.is_some() { - let pool = self.rc.sack.pool; - let mut map_versins = unsafe { init_map((*pool).nsolvables) }; - for ele in self.rc.conf.minversions.as_ref().unwrap() { - let (name, ver) = ele.split_once("=").unwrap(); - let mut di = create_dataiterator_empty(); - unsafe { - let m = CString::new(name).unwrap(); - dataiterator_init( - &mut di, - pool, - 0 as *mut Repo, - 0, - solv_knownid_SOLVABLE_NAME as i32, - m.as_ptr(), - SEARCH_STRING as i32, - ); - while dataiterator_step(&mut di) != 0 { - let solv = pool_id2solvable(pool, di.solvid); - let evr = solvable_lookup_str(solv, solv_knownid_SOLVABLE_EVR as i32); - let evr2 = CString::new(ver).unwrap(); - if pool_evrcmp_str(pool, evr, evr2.as_ptr(), EVRCMP_COMPARE as i32) < 0 { - map_set(&mut map_versins, di.solvid); - } - } - dataiterator_free(&mut di); - } - } - unsafe { - if (*pool).considered.is_null() { - (*pool).considered = libc::malloc(size_of::()) as *mut s_Map; - map_init((*pool).considered, (*pool).nsolvables); - } else { - map_grow((*pool).considered, (*pool).nsolvables); - } - map_setall((*pool).considered); - map_subtract((*pool).considered, &mut map_versins); - } - } - } - pub fn get_skip_problem_opt(&self, aler_args: &AlterOption) -> SkipProblem { - let mut skip_problem = SkipProblem { - none: false, - conflicts: false, - obsoletes: false, - disabled: false, - }; - // match self.rc.cli.command { - // crate::cli::Commands::Check => skip_problem.none = true, - // _ => { - if aler_args.skip_confilicts { - skip_problem.conflicts = true; - } - if aler_args.skip_obsolete { - skip_problem.obsoletes = true; - } - // } - // }; - skip_problem - } - pub fn goal_get_all_results_ignore_no_data( - &self, - alter_type: AlterType, - trans: *mut Transaction, - solv: *mut s_Solver, - ) -> Result { - let mut solved_pkg_info = SolvedPkgInfoBase::default(); - solved_pkg_info.to_install = - self.get_pkgs_with_specified_type(trans, SOLVER_TRANSACTION_INSTALL as i32)?; - solved_pkg_info.to_upgrade = - self.get_pkgs_with_specified_type(trans, SOLVER_TRANSACTION_UPGRADE as i32)?; - solved_pkg_info.to_downgrade = - self.get_pkgs_with_specified_type(trans, SOLVER_TRANSACTION_DOWNGRADE as i32)?; - solved_pkg_info.removed_by_downgrade = if solved_pkg_info.to_downgrade.is_some() { - let mut pkg_to_remove = init_queue(); - for pkg_info in solved_pkg_info.to_downgrade.as_ref().unwrap() { - let pkg_id = self - .rc - .sack - .solv_find_installed_pkg_by_name(pkg_info.base.name.as_str())? - .get_pkg_id(0); - queue_push(&mut pkg_to_remove, pkg_id); - } - if pkg_to_remove.count > 0 { - let remove_pkg_list = SolvPackageList::queue_to_pkg_list(&mut pkg_to_remove)?; - Some(PkgInfo::populate_pkg_info(&self.rc.sack, &remove_pkg_list,PkgInfoLevel::Details)?) - } else { - None - } - } else { - None - }; - solved_pkg_info.to_remove = - self.get_pkgs_with_specified_type(trans, SOLVER_TRANSACTION_ERASE as i32)?; - solved_pkg_info.un_needed = if alter_type.is_auto_erase() { - let mut queue_result = init_queue(); - unsafe { solver_get_unneeded(solv, &mut queue_result, 0) }; - if queue_result.count > 0 { - let pkg_list = SolvPackageList::queue_to_pkg_list(&mut queue_result)?; - Some(PkgInfo::populate_pkg_info(&self.rc.sack, &pkg_list,PkgInfoLevel::Details)?) - } else { - None - } - } else { - None - }; - solved_pkg_info.to_reinstall = - self.get_pkgs_with_specified_type(trans, SOLVER_TRANSACTION_REINSTALL as i32)?; - solved_pkg_info.obsoleted = - self.get_pkgs_with_specified_type(trans, SOLVER_TRANSACTION_OBSOLETED as i32)?; - Ok(solved_pkg_info) - } -} - -#[derive(Debug)] -pub struct SolvedPkgInfo { - pub need_action: u32, - pub need_download: u32, - pub not_available: Option>, - pub existing: Option>, - pub not_resolved: Vec, - pub not_installed: Option>, - pub base: SolvedPkgInfoBase, -} -#[derive(Debug)] -pub struct SolvedPkgInfoBase { - pub to_install: Option>, - pub to_downgrade: Option>, - pub to_upgrade: Option>, - pub to_remove: Option>, - pub un_needed: Option>, - pub to_reinstall: Option>, - pub obsoleted: Option>, - pub removed_by_downgrade: Option>, -} -impl SolvedPkgInfoBase { - pub fn default() -> Self { - SolvedPkgInfoBase { - to_install: None, - to_downgrade: None, - to_upgrade: None, - to_remove: None, - un_needed: None, - to_reinstall: None, - obsoleted: None, - removed_by_downgrade: None, - } - } - pub fn get_need_action(&self) -> u32 { - let mut action = 0; - if self.to_install.is_some() { - action += 1; - } - if self.to_upgrade.is_some() { - action += 1; - } - if self.to_downgrade.is_some() { - action += 1; - } - if self.to_remove.is_some() { - action += 1; - } - if self.un_needed.is_some() { - action += 1; - } - if self.to_reinstall.is_some() { - action += 1; - } - if self.obsoleted.is_some() { - action += 1; - } - action - } - pub fn get_need_download(&self) -> u32 { - let mut download = 0; - if self.to_install.is_some() { - download += 1; - } - if self.to_upgrade.is_some() { - download += 1; - } - if self.to_downgrade.is_some() { - download += 1; - } - if self.to_reinstall.is_some() { - download += 1; - } - download - } -} -impl SolvedPkgInfo { - pub fn default() -> Self { - SolvedPkgInfo { - need_action: 0, - need_download: 0, - not_available: None, - existing: None, - not_resolved: Vec::new(), - not_installed: None, - base: SolvedPkgInfoBase::default(), - } - } -} diff --git a/rdnf/src/gpgcheck.rs b/rdnf/src/gpgcheck.rs deleted file mode 100644 index 68e554e56a5b7b233cd075abe23f790112022427..0000000000000000000000000000000000000000 --- a/rdnf/src/gpgcheck.rs +++ /dev/null @@ -1,187 +0,0 @@ -use std::{ffi::CString, fs::File, io::Read}; - -use crate::{ - errors::{ERROR_RDNF_NO_GPGKEY_CONF_ENTRY, ERROR_RDNF_OPERATION_ABORTED, ERROR_RDNF_INVALID_PUBKEY_FILE, ERROR_RDNF_RPMTS_CREATE_FAILED, ERROR_RDNF_RPMTD_CREATE_FAILED, ERROR_RDNF_RPM_GET_RSAHEADER_FAILED, ERROR_RDNF_RPM_GPG_PARSE_FAILED, ERROR_RDNF_RPM_GPG_NO_MATCH}, - rpm_trans::{RpmTs, RPMVSF_MASK_NOSIGNATURES}, - sub_command::{install::is_remote_url, repo::RepoData}, - Rdnf, cli::AlterOption, c_lib::{char_ptr_offset}, -}; -use anyhow::{bail, Result}; -use dialoguer::{theme::ColorfulTheme, Confirm}; -use rpm_sys::ffi::{ - rpmRC_e_RPMRC_NOKEY, rpmRC_e_RPMRC_NOTTRUSTED, rpmReadPackageFile, Fclose, Fopen, Header, rpmts, pgpParsePkts, pgpArmor_e_PGPARMOR_PUBKEY, rpmtsImportPubkey, rpmtsGetKeyring, rpmKeyring_s, rpmPubkeyNew, rpmPubkeyDig, rpmKeyringLookup, rpmRC_e_RPMRC_OK, rpmKeyringAddKey, rpmtsCreate, rpmtsSetVSFlags, rpmtdNew, headerConvert, headerConvOps_e_HEADERCONV_RETROFIT_V3, headerGet, rpmTag_e_RPMTAG_RSAHEADER, headerGetFlags_e_HEADERGET_MINMEM, pgpNewDig, pgpPrtPkts, pgpFreeDig, headerFree, rpmtdFree, rpmtsFree, -}; - -impl Rdnf { - pub fn gpgcheck_pkg(&self, rpm_ts: &mut RpmTs, file_path: &str, repo: &RepoData,alter_args:&AlterOption) -> Result<(Header,bool)> { - let mut gpg_sig_check = false; - let mut url_gpg_keys = None; - if !(alter_args.no_gpg_check || alter_args.skip_signatures) { - if repo.base.gpgcheck { - gpg_sig_check = true; - if repo.details.url_gpg_keys.is_some() { - url_gpg_keys = repo.details.url_gpg_keys.clone(); - } - } - } - let file_path_c = CString::new(file_path).unwrap(); - let fmode = CString::new("r.ufdio").unwrap(); - let fd = unsafe { Fopen(file_path_c.as_ptr(), fmode.as_ptr()) }; - let mut rpm_header = 0 as Header; - let rpm_rc = - unsafe { rpmReadPackageFile(rpm_ts.ts, fd, file_path_c.as_ptr(), &mut rpm_header) }; - if (rpm_rc == rpmRC_e_RPMRC_NOTTRUSTED || rpm_rc == rpmRC_e_RPMRC_NOKEY) && gpg_sig_check { - if url_gpg_keys.is_none() { - bail!(ERROR_RDNF_NO_GPGKEY_CONF_ENTRY); - } - let mut matched=0; - for url in url_gpg_keys.unwrap() { - let prompt = format!("Importing key from {}, is this ok", url.as_str()); - if !Confirm::with_theme(&ColorfulTheme::default()) - .with_prompt(prompt) - .interact() - .unwrap() - { - bail!(ERROR_RDNF_OPERATION_ABORTED); - }; - let key_local_path = if is_remote_url(url.as_str()) { - self.download_key_to_cache(url.as_str(), repo)? - } else { - match url.split_once("file://") { - Some((_, rest)) => "/".to_string() + rest.trim_start_matches("/"), - None => url, - } - }; - import_gpgkey_file(rpm_ts.ts,key_local_path.as_str())?; - let key_ring=unsafe{rpmtsGetKeyring(rpm_ts.ts, 0)}; - if gpgcheck(key_ring,key_local_path.as_str(),file_path)?{ - matched+=1; - } - } - if matched==0{ - bail!(ERROR_RDNF_RPM_GPG_NO_MATCH); - } - unsafe { rpmReadPackageFile(rpm_ts.ts, fd, file_path_c.as_ptr(), &mut rpm_header) }; - } - unsafe { Fclose(fd) }; - Ok((rpm_header,gpg_sig_check)) - } -} -pub fn import_gpgkey_file(ts:rpmts,file_path:&str)->Result<()>{ - let mut file=File::open(file_path)?; - let mut buffer=String::new(); - file.read_to_string(&mut buffer)?; - let data_size=buffer.len(); - let mut offset=0; - let buf = CString::new(buffer).unwrap(); - let mut pkt_ptr=0 as *mut u8; - let mut pkt_len=0 as u64; - let mut keys=0; - while (offset as usize) < data_size { - unsafe{ - let buf_ptr=char_ptr_offset(buf.as_ptr(), offset); - let armor_res=pgpParsePkts(buf_ptr,&mut pkt_ptr as *mut *mut u8,&mut pkt_len as *mut u64); - if armor_res==pgpArmor_e_PGPARMOR_PUBKEY{ - if rpmtsImportPubkey(ts, pkt_ptr, pkt_len)!=0{ - bail!(ERROR_RDNF_INVALID_PUBKEY_FILE); - } - keys+=1; - } - offset+=pkt_len as i32; - - }; - } - if keys==0{ - bail!(ERROR_RDNF_INVALID_PUBKEY_FILE); - } - Ok(()) -} -pub fn gpgcheck(key_ring:*mut rpmKeyring_s,key_file_path:&str,pkg_file:&str)->Result{ - add_key_file_to_keyring(key_ring,key_file_path)?; - Ok(verify_rpm_sig(key_ring,pkg_file)?) -} -pub fn add_key_file_to_keyring(key_ring:*mut rpmKeyring_s,key_file_path:&str)->Result<()>{ - let mut file=File::open(key_file_path)?; - let mut buffer=String::new(); - file.read_to_string(&mut buffer)?; - let data_size=buffer.len(); - let mut offset=0; - let buf = CString::new(buffer).unwrap(); - let mut pkt_ptr=0 as *mut u8; - let mut pkt_len=0 as u64; - let mut keys=0; - while (offset as usize) < data_size { - unsafe{ - let buf_ptr=char_ptr_offset(buf.as_ptr(), offset); - let armor_res=pgpParsePkts(buf_ptr,&mut pkt_ptr as *mut *mut u8,&mut pkt_len as *mut u64); - if armor_res==pgpArmor_e_PGPARMOR_PUBKEY{ - let pubkey=rpmPubkeyNew(pkt_ptr, pkt_len); - if pubkey.is_null() { - bail!(ERROR_RDNF_INVALID_PUBKEY_FILE) - } - let sig=rpmPubkeyDig(pubkey); - if sig.is_null() { - bail!(ERROR_RDNF_INVALID_PUBKEY_FILE); - } - if rpmKeyringLookup(key_ring, sig) !=rpmRC_e_RPMRC_OK{ - rpmKeyringAddKey(key_ring, pubkey); - }; - keys+=1; - } - offset+=pkt_len as i32; - - }; - } - if keys==0{ - bail!(ERROR_RDNF_INVALID_PUBKEY_FILE); - } - Ok(()) -} -pub fn verify_rpm_sig(key_ring:*mut rpmKeyring_s,pkg_file:&str)->Result{ - let pkg_file=CString::new(pkg_file).unwrap(); - let mode=CString::new("r.fdio").unwrap(); - let ts=unsafe{rpmtsCreate()}; - if ts.is_null() { - bail!(ERROR_RDNF_RPMTS_CREATE_FAILED); - } - unsafe{rpmtsSetVSFlags(ts, RPMVSF_MASK_NOSIGNATURES)}; - let td=unsafe{rpmtdNew()}; - if td.is_null() { - bail!(ERROR_RDNF_RPMTD_CREATE_FAILED); - } - let fd=unsafe{Fopen(pkg_file.as_ptr(), mode.as_ptr())}; - let b=unsafe{ - let mut header=0 as Header; - rpmReadPackageFile(ts, fd, pkg_file.as_ptr(), &mut header); - if headerConvert(header, headerConvOps_e_HEADERCONV_RETROFIT_V3 as i32)==0{ - bail!(ERROR_RDNF_RPM_GET_RSAHEADER_FAILED); - }; - if headerGet(header, rpmTag_e_RPMTAG_RSAHEADER, td, headerGetFlags_e_HEADERGET_MINMEM)==0{ - bail!(ERROR_RDNF_RPM_GET_RSAHEADER_FAILED); - }; - let digest=pgpNewDig(); - if pgpPrtPkts((*td).data as *const u8, (*td).count as u64, digest, 0) !=0{ - bail!(ERROR_RDNF_RPM_GPG_PARSE_FAILED); - } - let b=rpmKeyringLookup(key_ring, digest)==rpmRC_e_RPMRC_OK; - if !digest.is_null() { - pgpFreeDig(digest); - } - if !header.is_null() { - headerFree(header); - } - b - }; - unsafe{ - if !fd.is_null() { - Fclose(fd); - } - if !td.is_null() { - rpmtdFree(td); - } - if !ts.is_null() { - rpmtsFree(ts); - } - } - Ok(b) -} \ No newline at end of file diff --git a/rdnf/src/hash.rs b/rdnf/src/hash.rs new file mode 100644 index 0000000000000000000000000000000000000000..2f34f5f48ca53c74fdf13fbe39c413dbdc2f3534 --- /dev/null +++ b/rdnf/src/hash.rs @@ -0,0 +1,61 @@ +use anyhow::{bail, Result}; +use sha1::Digest; +use sha1::Sha1; +use sha2::{Sha256, Sha512}; +use std::io::Read; +use std::path::Path; +use std::{fs::File, io}; +pub enum HashKind { + MD5, + SHA1, + SHA256, + SHA512, + Invalid, +} +impl From<&str> for HashKind { + #[inline] + fn from(kind: &str) -> Self { + match kind.as_bytes() { + b"md5" => Self::MD5, + b"sha1" => Self::SHA1, + b"sha256" => Self::SHA256, + b"sha512" => Self::SHA512, + _ => Self::Invalid, + } + } +} +impl HashKind { + #[inline] + pub fn checksum>(&self, file: P, hash: &str) -> Result { + let mut file = File::open(file)?; + let sum = match self { + HashKind::MD5 => { + let mut buf = Vec::new(); + file.read_to_end(&mut buf)?; + hex::encode(md5::compute(buf).0) + } + HashKind::SHA1 => { + let mut hasher = Sha1::new(); + io::copy(&mut file, &mut hasher)?; + hex::encode(hasher.finalize()) + } + HashKind::SHA256 => { + let mut hasher = Sha256::new(); + io::copy(&mut file, &mut hasher)?; + + hex::encode(hasher.finalize()) + } + + HashKind::SHA512 => { + let mut hasher = Sha512::new(); + io::copy(&mut file, &mut hasher)?; + hex::encode(hasher.finalize()) + } + HashKind::Invalid => { + bail!("Invalid hash algorithm") + } + }; + Ok(sum == hash) + } +} + diff --git a/rdnf/src/history.rs b/rdnf/src/history.rs deleted file mode 100644 index 3f09609357bde5cd4e24f0a965f0245f5317e0bc..0000000000000000000000000000000000000000 --- a/rdnf/src/history.rs +++ /dev/null @@ -1,29 +0,0 @@ -use std::{path::Path, fs::OpenOptions}; -use rusqlite::Connection; - -use crate::{Rdnf, default::{DEFAULT_DATA_LOCATION, HISTORY_DB_FILE}}; - -pub struct HistoryCtx{ - -} -impl HistoryCtx{ - pub fn new(path:&str)->Self{ - let db=Connection::open(path)?; - // "transactions" - - } - -} -impl Rdnf{ - pub fn get_history_ctx(&self)->Result{ - let history_db_path=self.rc.cli.installroot.trim_end_matches("/").to_string() - +DEFAULT_DATA_LOCATION.trim_end_matches("/")+"/"+HISTORY_DB_FILE; - if !Path::new(history_db_path.as_str()).exists(){ - OpenOptions::new().create(true).open(history_db_path.as_str())?; - }; - } - -} -pub fn db_table_exists(db:&Connection){ - -} \ No newline at end of file diff --git a/rdnf/src/i18n.rs b/rdnf/src/i18n.rs index 70f0aff22346991c917ac69b38965ebd934e3a58..07cee5cb7304d003a644ee728b25f3d01877dc79 100644 --- a/rdnf/src/i18n.rs +++ b/rdnf/src/i18n.rs @@ -1,73 +1,64 @@ -#[cfg(feature="en_US")] -pub mod repo_list{ - pub const REPOLIST_REPO_ID:&str="repo id"; - pub const REPOLIST_REPO_NAME:&str="repo name"; - pub const REPOLIST_REPO_STATUS:&str="status"; - pub const REPOLIST_REPO_STATUS_ENABLED:&str="enabled"; - pub const REPOLIST_REPO_STATUS_DISABLED:&str="disabled"; - +#[cfg(feature = "en_US")] +pub mod repo_list { + pub const REPOLIST_REPO_ID: &str = "repo id"; + pub const REPOLIST_REPO_NAME: &str = "repo name"; + pub const REPOLIST_REPO_STATUS: &str = "status"; + pub const REPOLIST_REPO_STATUS_ENABLED: &str = "enabled"; + pub const REPOLIST_REPO_STATUS_DISABLED: &str = "disabled"; } -#[cfg(feature="zh_CN")] -pub mod repo_list{ - pub const REPOLIST_REPO_ID:&str="仓库 id"; - pub const REPOLIST_REPO_NAME:&str="仓库名称"; - pub const REPOLIST_REPO_STATUS:&str="状态"; - pub const REPOLIST_REPO_STATUS_ENABLED:&str="启用"; - pub const REPOLIST_REPO_STATUS_DISABLED:&str="禁用"; +#[cfg(feature = "zh_CN")] +pub mod repo_list { + pub const REPOLIST_REPO_ID: &str = "仓库 id"; + pub const REPOLIST_REPO_NAME: &str = "仓库名称"; + pub const REPOLIST_REPO_STATUS: &str = "状态"; + pub const REPOLIST_REPO_STATUS_ENABLED: &str = "启用"; + pub const REPOLIST_REPO_STATUS_DISABLED: &str = "禁用"; } -#[cfg(feature="en_US")] -pub mod action_alter{ - pub const ACTION_ALTER_INTALL:&str="Installing"; - pub const ACTION_ALTER_UPGRADE:&str="Upgrading"; - pub const ACTION_ALTER_ERASE:&str="Removing"; - pub const ACTION_ALTER_DOWNGRADE:&str="Downgrading"; - pub const ACTION_ALTER_REINSTALL:&str="Reinstalling"; - pub const ACTION_ALTER_OBSOLETED:&str="Obsoleting"; +#[cfg(feature = "en_US")] +pub mod action_alter { + pub const ACTION_ALTER_INTALL: &str = "Installing"; + pub const ACTION_ALTER_UPGRADE: &str = "Upgrading"; + pub const ACTION_ALTER_ERASE: &str = "Removing"; + pub const ACTION_ALTER_DOWNGRADE: &str = "Downgrading"; + pub const ACTION_ALTER_REINSTALL: &str = "Reinstalling"; + pub const ACTION_ALTER_OBSOLETED: &str = "Obsoleting"; } -#[cfg(feature="zh_CN")] -pub mod action_alter{ - pub const ACTION_ALTER_INTALL:&str="安装"; - pub const ACTION_ALTER_UPGRADE:&str="升级"; - pub const ACTION_ALTER_ERASE:&str="卸载"; - pub const ACTION_ALTER_DOWNGRADE:&str="降级"; - pub const ACTION_ALTER_REINSTALL:&str="重新安装"; - pub const ACTION_ALTER_OBSOLETED:&str="废弃"; +#[cfg(feature = "zh_CN")] +pub mod action_alter { + pub const ACTION_ALTER_INTALL: &str = "安装"; + pub const ACTION_ALTER_UPGRADE: &str = "升级"; + pub const ACTION_ALTER_ERASE: &str = "卸载"; + pub const ACTION_ALTER_DOWNGRADE: &str = "降级"; + pub const ACTION_ALTER_REINSTALL: &str = "重新安装"; + pub const ACTION_ALTER_OBSOLETED: &str = "废弃"; } - -#[cfg(feature="en_US")] -pub mod pkg_info{ - pub const PKG_INFO_NAME___:&str="Name "; - pub const PKG_INFO_ARCH___:&str="Arch "; - pub const PKG_INFO_EPOCH__:&str="Epoch "; - pub const PKG_INFO_VERSION:&str="Version "; - pub const PKG_INFO_RELEASE:&str="Release "; - pub const PKG_INFO_SIZE___:&str="Size "; - pub const PKG_INFO_REPO___:&str="Repo "; - pub const PKG_INFO_SUMMARY:&str="Summary "; - pub const PKG_INFO_URL____:&str="URL "; - pub const PKG_INFO_LICENSE:&str="License "; - pub const PKG_INFO_DESC___:&str="Description"; +#[cfg(feature = "en_US")] +pub mod pkg_info { + pub const PKG_INFO_NAME___: &str = "Name "; + pub const PKG_INFO_ARCH___: &str = "Arch "; + pub const PKG_INFO_EPOCH__: &str = "Epoch "; + pub const PKG_INFO_VERSION: &str = "Version "; + pub const PKG_INFO_RELEASE: &str = "Release "; + pub const PKG_INFO_SIZE___: &str = "Size "; + pub const PKG_INFO_REPO___: &str = "Repo "; + pub const PKG_INFO_SUMMARY: &str = "Summary "; + pub const PKG_INFO_URL____: &str = "URL "; + pub const PKG_INFO_LICENSE: &str = "License "; + pub const PKG_INFO_DESC___: &str = "Description"; } -#[cfg(feature="zh_CN")] -pub mod pkg_info{ - pub const PKG_INFO_NAME___:&str="名称 "; - pub const PKG_INFO_ARCH___:&str="架构 "; - pub const PKG_INFO_EPOCH__:&str="时期 "; - pub const PKG_INFO_VERSION:&str="版本 "; - pub const PKG_INFO_RELEASE:&str="发布 "; - pub const PKG_INFO_SIZE___:&str="大小 "; - pub const PKG_INFO_REPO___:&str="仓库 "; - pub const PKG_INFO_SUMMARY:&str="概况 "; - pub const PKG_INFO_URL____:&str="URL "; - pub const PKG_INFO_LICENSE:&str="协议 "; - pub const PKG_INFO_DESC___:&str="描述 "; +#[cfg(feature = "zh_CN")] +pub mod pkg_info { + pub const PKG_INFO_NAME___: &str = "名称 "; + pub const PKG_INFO_ARCH___: &str = "架构 "; + pub const PKG_INFO_EPOCH__: &str = "时期 "; + pub const PKG_INFO_VERSION: &str = "版本 "; + pub const PKG_INFO_RELEASE: &str = "发布 "; + pub const PKG_INFO_SIZE___: &str = "大小 "; + pub const PKG_INFO_REPO___: &str = "仓库 "; + pub const PKG_INFO_SUMMARY: &str = "概况 "; + pub const PKG_INFO_URL____: &str = "URL "; + pub const PKG_INFO_LICENSE: &str = "协议 "; + pub const PKG_INFO_DESC___: &str = "描述 "; } - - - - - - - diff --git a/rdnf/src/main.rs b/rdnf/src/main.rs index c7132dc4e24a0274e82286678cae31e66652684d..2fa8e3fdb3cad67a1d1f9f4b2e99978f99b53f15 100644 --- a/rdnf/src/main.rs +++ b/rdnf/src/main.rs @@ -1,141 +1,102 @@ use anyhow::Result; use clap::Parser; -use console::Term; -use indicatif::MultiProgress; -use solv_sys::ffi::Repo; - -use cli::{rpm_init, Cli, Commands}; -use conf::ConfigMain; - -use solv::sack::Solvsack; -use sub_command::{ - install::AlterType, - repo::{init_cmdline_repo, load_repo_data, repo_list_finalize, RepoData, RepoListFilter}, +use cli::{Cli, Commands}; +use command::alter::AlterType; +use conf::{ + config_main::RdnfConfig, + repo_conf::{finalize_repos, RepoConfig}, }; -use utils::is_already_running; - -use crate::utils::check_root; - -mod sub_command; +use console::Term; +use flock::{is_already_running, lock_rpmdb}; -mod c_lib; +use utils::check_root; +pub mod cache; mod cli; +mod command; mod conf; mod default; -mod errors; -mod goal; -mod gpgcheck; +mod error; +mod flock; +mod hash; mod i18n; -mod lock; -mod metalink; -mod output; -mod pkgutils; -mod repomd; -mod rpm_trans; -mod solv; +pub mod repo; +pub mod solve; mod utils; -#[derive(Debug, Clone)] pub struct Rdnf { - rc: RdnfContext, - repos: Vec, - solv_cmdline_repo: *mut Repo, -} -#[derive(Debug, Clone)] -pub struct RdnfContext { - sack: Solvsack, cli: Cli, - conf: ConfigMain, - multi_process: MultiProgress, + conf: RdnfConfig, + repo_confs: Vec, term: Term, } impl Rdnf { pub fn new() -> Result { is_already_running()?; - rpm_init()?; - let mut cli = Cli::parse().init()?; - let conf = ConfigMain::from(&mut cli)?; - let mut sack = Solvsack::from(&conf, &cli)?; - let mut repos = load_repo_data(&conf, RepoListFilter::All)?; - repos.sort_by(|a, b| a.base.priority.cmp(&(b.base.priority))); - repos.sort_by(|a, b| a.psz_id.cmp(&b.psz_id)); - repo_list_finalize(&mut cli, &conf, &mut repos)?; - let solv_cmdline_repo = init_cmdline_repo(&mut sack)?; + lock_rpmdb()?; + let cli = Cli::parse().init(); + let conf = RdnfConfig::read(&cli.config_file_path)?; + let mut repo_confs = RepoConfig::read_all(&conf.config_main.repodir)?; + finalize_repos(&cli, &conf, &mut repo_confs)?; let term = Term::stdout(); - let multi_process = MultiProgress::new(); - let rc = RdnfContext { - sack, + Ok(Rdnf { cli, conf, - multi_process, + repo_confs, term, - }; - let rdnf = Rdnf { - rc, - repos, - solv_cmdline_repo, - }; - Ok(rdnf) - } - pub fn refresh_sack() -> Result<()> { - Ok(()) + }) } } - -impl AsRef for Rdnf { - #[inline] - fn as_ref(&self) -> &Self { - self - } -} -// #[tokio::main(flavor = "current_thread")] -fn main() -> Result<()> { +// #[tokio::main(flavor="current_thread")] +#[tokio::main] +async fn main() -> Result<()> { let mut rdnf = Rdnf::new()?; - match &rdnf.rc.cli.command { - Commands::Repolist(_) => { - rdnf.repo_list()?; - } + match rdnf.cli.command { + Commands::Repolist(_) => rdnf.repo_list()?, Commands::Makecache => { - rdnf.rc.cli.refresh = true; - rdnf.make_cache()?; - } - Commands::Search { pkgs } => { - let pkgs = pkgs.clone(); - rdnf.search_pkg(pkgs)?; + rdnf.cli.refresh = true; + let _repos = + Rdnf::make_cache(rdnf.repo_confs, rdnf.conf.config_main, rdnf.cli, rdnf.term) + .await?; } - Commands::Install(alter) => { + Commands::Install(ref alter) => { check_root()?; let pkgs = alter.pkgs.clone(); - let alter = alter.clone(); - rdnf.alter_command(pkgs, AlterType::Install, &alter)?; + let alter_arg = alter.clone(); + Rdnf::alter_command(rdnf, pkgs, AlterType::Install, alter_arg).await?; } - Commands::Remove(alter)=>{ - check_root()?; - let pkgs=alter.pkgs.clone(); - let alter=alter.clone(); - rdnf.alter_command(pkgs, AlterType::Erase, &alter)?; - } - Commands::Reinstall(alter) => { - check_root()?; - let pkgs=alter.pkgs.clone(); - let alter=alter.clone(); - rdnf.alter_command(pkgs, AlterType::ReInstall, &alter)?; - - }, - Commands::Update(alter) => { - check_root()?; - let pkgs = alter.pkgs.clone(); - let alter=alter.clone(); - if pkgs.is_empty(){ - rdnf.alter_command(pkgs, AlterType::UpgradeAll, &alter)?; - }else{ - rdnf.alter_command(pkgs, AlterType::Upgrade, &alter)?; - } - }, - Commands::Info(info_opt) => { - rdnf.info_command(info_opt.clone())?; - - }, - + // Commands::Makecache()=>rdnf.make_cache()?, + _ => {} } Ok(()) + // println!("Hello, world!"); +} +#[test] +fn verify_cli() { + use clap::CommandFactory; + Cli::command().debug_assert(); +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use assert_cmd::Command; + fn get_cmd() -> Result { + Ok(Command::cargo_bin("rdnf")?) + } + #[test] + fn test_repo_list() -> Result<()> { + let mut cmd = get_cmd()?; + cmd.arg("install"); + cmd.arg("389-ds-base"); + // cmd.arg("python3-ldap"); + // cmd.arg("vim-enhanced"); + // dbg!(cmd.ok()); + let p = String::from_utf8(cmd.output().unwrap().stderr).unwrap(); + println!( + "{} + ", + p + ); + Ok(()) + } } +//ncurses-libs diff --git a/rdnf/src/metalink.rs b/rdnf/src/metalink.rs deleted file mode 100644 index e3f0a21072147dd1a69615b957754559a9f5bfe7..0000000000000000000000000000000000000000 --- a/rdnf/src/metalink.rs +++ /dev/null @@ -1,171 +0,0 @@ -use std::{fs::File, io::Read}; - -use anyhow::{bail, Result}; -use quick_xml::{events::Event, Reader}; - -use crate::sub_command::repoutils::HashKind; -#[derive(Debug, Clone)] -pub struct MetalinkHashInfo { - pub kind: HashKind, - pub value: String, -} -#[derive(Debug, Clone)] -pub struct MetalinkUrlInfo { - pub protocol: String, - pub kind: String, - pub location: String, - pub preference: i32, - pub url: String, -} -#[derive(Debug, Clone)] -pub struct MetalinkContext { - pub filename: String, - pub timestamp: u128, - pub size: u128, - pub hashs: Vec, - pub urls: Vec, -} -impl MetalinkContext { - pub fn from(path: &str) -> Result> { - let mut buffer = String::new(); - File::open(path).unwrap().read_to_string(&mut buffer)?; - let mut reader = Reader::from_str(buffer.as_str()); - reader.trim_text(true); - let mut files = Vec::new(); - loop { - match reader.read_event() { - Ok(Event::Start(e)) => { - if e.name().as_ref() == b"file" { - for attr in e.attributes() { - let attr = attr?; - if String::from_utf8_lossy(attr.key.as_ref()) == "name" { - let mut mc = MetalinkContext { - filename: String::from_utf8_lossy(attr.value.as_ref()) - .to_string(), - timestamp: 0, - size: 0, - hashs: Vec::new(), - urls: Vec::new(), - }; - loop { - match reader.read_event()? { - Event::Start(ele) => match ele.name().as_ref() { - b"size" => { - mc.size = reader - .read_text(ele.name())? - .parse::()?; - } - b"hash" => { - for attr in ele.attributes() { - let attr = attr?; - if String::from_utf8_lossy(attr.key.as_ref()) - == "type" - { - let hash_info = MetalinkHashInfo { - kind: HashKind::from( - String::from_utf8_lossy( - attr.value.as_ref(), - ) - .to_string() - .as_str(), - ), - value: reader - .read_text(ele.name())? - .to_string(), - }; - mc.hashs.push(hash_info); - } - } - } - b"url" => { - let mut protocol = String::from("https"); - let mut kind = String::from("https"); - let mut location = String::from("US"); - let mut preference = 100; - for attr in ele.attributes() { - let attr = attr?; - let key = - String::from_utf8_lossy(attr.key.as_ref()); - let value = String::from_utf8_lossy( - attr.value.as_ref(), - ); - if key == "protocol" { - protocol = value.to_string(); - } else if key == "type" { - kind = value.to_string(); - } else if key == "location" { - location = value.to_string(); - } else if key == "preference" { - preference = value.parse::()?; - } - } - let url = reader.read_text(ele.name())?.to_string(); - let url_info = MetalinkUrlInfo { - protocol, - kind, - location, - preference, - url, - }; - if url_info - .protocol - .matches("http") - .collect::>() - .len() - >= 1 - { - mc.urls.push(url_info); - } - } - _ => { - if String::from_utf8_lossy(ele.name().as_ref()) - .matches("timestamp") - .collect::>() - .len() - >= 1 - { - mc.timestamp = reader - .read_text(ele.name())? - .parse::()?; - } - } - }, - Event::End(ele) => { - if ele.name().as_ref() == b"file" { - break; - } - } - _ => {} - } - } - mc.urls.sort_by(|a, b| b.preference.cmp(&a.preference)); - files.push(mc); - } - } - } - } - Ok(Event::Eof) => break, // exits the loop when reaching end of file - Err(e) => panic!("Error at position {}: {:?}", reader.buffer_position(), e), - _ => (), // There are several other `Event`s we do not consider here - } - } - Ok(files) - } - pub fn from_with_filename(path: &str, name: &str) -> Result { - let vec = Self::from(path)?; - let mut t = None; - for ele in vec { - if ele.filename == name { - t = Some(ele); - break; - } - } - let t = match t { - Some(t) => t, - None => { - bail!("check metalink url") - } - }; - Ok(t) - } -} diff --git a/rdnf/src/output.rs b/rdnf/src/output.rs deleted file mode 100644 index c40390e4bd692c7a06d24d9c8b98cb038aceb468..0000000000000000000000000000000000000000 --- a/rdnf/src/output.rs +++ /dev/null @@ -1,149 +0,0 @@ -use console::{style, Term}; - -use crate::{ - errors::ERROR_RDNF_INVALID_PARAMETER, - goal::{SolvedPkgInfo}, - i18n::action_alter::{ - ACTION_ALTER_DOWNGRADE, ACTION_ALTER_ERASE, ACTION_ALTER_INTALL, ACTION_ALTER_OBSOLETED, - ACTION_ALTER_REINSTALL, ACTION_ALTER_UPGRADE, - }, - sub_command::{install::AlterType, info::PkgInfo}, - utils::format_size, -}; -use anyhow::{bail, Result}; -impl SolvedPkgInfo { - pub fn print(&self, term: &Term) -> Result<()> { - if self.existing.is_some() { - for pkg_info in self.existing.as_ref().unwrap() { - let base = &pkg_info.base; - let name = format!( - "{}-{}-{}.{}", - style(base.name.as_str()).green(), - base.version, - base.release, - base.arch - ); - term.write_line(&format!("Package {} is already installed, skipping", name))?; - } - } - if self.not_available.is_some() { - for pkg_info in self.not_available.as_ref().unwrap() { - term.write_line(&format!( - "No package {} available.", - style(pkg_info.base.name.as_str()).red() - ))?; - } - } - if self.base.to_install.is_some() { - PkgInfo::print_action( - self.base.to_install.as_ref().unwrap(), - term, - AlterType::Install, - )?; - } - if self.base.to_upgrade.is_some() { - PkgInfo::print_action( - self.base.to_upgrade.as_ref().unwrap(), - term, - AlterType::Upgrade, - )?; - } - if self.base.to_downgrade.is_some() { - PkgInfo::print_action( - self.base.to_downgrade.as_ref().unwrap(), - term, - AlterType::DownGrade, - )?; - } - if self.base.to_remove.is_some() { - PkgInfo::print_action( - self.base.to_remove.as_ref().unwrap(), - term, - AlterType::Erase, - )?; - } - if self.base.un_needed.is_some() { - PkgInfo::print_action( - self.base.un_needed.as_ref().unwrap(), - term, - AlterType::Erase, - )?; - } - if self.base.to_reinstall.is_some() { - PkgInfo::print_action( - self.base.to_reinstall.as_ref().unwrap(), - term, - AlterType::ReInstall, - )?; - } - if self.base.obsoleted.is_some() { - PkgInfo::print_action( - self.base.obsoleted.as_ref().unwrap(), - term, - AlterType::Obsoleted, - )?; - } - Ok(()) - } -} -impl PkgInfo { - pub fn print_action(pkg_infos: &Vec, term: &Term, alter_type: AlterType) -> Result<()> { - term.write_line((alter_type.to_str()?.to_string() + ":").as_str())?; - let (_, width) = term.size(); - let width_float = width as f32; - let name_col = (width_float * 0.3) as usize; - let arch_col = (width_float * 0.15) as usize; - let evr_col = (width_float * 0.25) as usize; - let repo_col = (width_float * 0.15) as usize; - let install_size_col = (width_float * 0.15) as usize; - let mut total_size = 0; - for pkg_info in pkg_infos { - let base = &pkg_info.base; - let evr = if base.epoch == 0 { - format!("{}-{}", base.version, base.release) - } else { - format!("{}:{}-{}", base.epoch, base.version, base.release) - }; - total_size += pkg_info.details.install_size; - let item = format!( - "{: Result<&str> { - let p = match self { - Self::Install => ACTION_ALTER_INTALL, - Self::Upgrade => ACTION_ALTER_UPGRADE, - Self::Erase => ACTION_ALTER_ERASE, - Self::DownGrade => ACTION_ALTER_DOWNGRADE, - Self::ReInstall => ACTION_ALTER_REINSTALL, - Self::Obsoleted => ACTION_ALTER_OBSOLETED, - _ => { - bail!(ERROR_RDNF_INVALID_PARAMETER) - } - }; - Ok(p) - } -} diff --git a/rdnf/src/pkgutils.rs b/rdnf/src/pkgutils.rs deleted file mode 100644 index fd3772aae1372595a05fd7a216423ffddb54b908..0000000000000000000000000000000000000000 --- a/rdnf/src/pkgutils.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::ffi::CStr; - -use anyhow::{bail, Result}; -use solv_sys::ffi::{Queue}; - -use crate::{ - c_lib::{pool_id2solvable, queue_push}, - default::RDNF_NAME, - errors::{ERROR_RDNF_NO_DATA, ERROR_RDNF_NO_MATCH, ERROR_RDNF_SELF_ERASE}, - goal::{SolvedPkgInfoBase}, - solv::{sack::Solvsack, SolvPackageList}, - Rdnf, sub_command::info::{PkgInfo, PkgInfoLevel}, -}; - -impl Solvsack { - pub fn get_glob_pkgs(&self,pkg_glob:&str,queue_goal: *mut Queue)->Result<()>{ - let pkg_list=self.solv_find_available_pkg_by_name(pkg_glob)?; - if pkg_list.get_size() >0{ - for index in 0..pkg_list.get_size(){ - let id=pkg_list.get_pkg_id(index); - queue_push(queue_goal, id); - } - } - Ok(()) - } - pub fn add_pkgs_for_install(&self, queue_goal: *mut Queue, pkg_name: &str) -> Result { - let highest_id = self.solv_find_highest_available(pkg_name)?; - if self.verify_install_pkg(highest_id)? { - queue_push(queue_goal, highest_id); - Ok(true) - } else { - Ok(false) - } - } - pub fn add_pkgs_for_upgrade(&self,queue_goal: *mut Queue,pkg_name: &str)->Result{ - let highest_id=self.solv_find_highest_available(pkg_name)?; - if self.verify_upgrade_pkg(highest_id)?{ - queue_push(queue_goal, highest_id); - Ok(true) - }else{ - Ok(false) - } - } - pub fn add_pkgs_for_erase(&self,queue_goal: *mut Queue,pkg_name: &str)->Result{ - match self.solv_find_installed_pkg_by_name(pkg_name) { - Ok(install_pkg_list) => { - let count = install_pkg_list.get_size(); - for index in 0..count{ - let id=install_pkg_list.get_pkg_id(index); - queue_push(queue_goal, id); - } - }, - Err(_) => {bail!("Package {} don't be installed,can't be removed",pkg_name)}, - }; - Ok(true) - } - pub fn add_pkgs_for_reinstall(&self,queue_goal: *mut Queue,pkg_name: &str)->Result<()>{ - match self.solv_find_installed_pkg_by_name(pkg_name) { - Ok(install_pkg_list) => { - let installed_id=install_pkg_list.get_pkg_id(0); - let nevr=self.solv_get_pkg_nevr_by_id(installed_id)?; - let available_pkg_list=self.solv_find_available_pkg_by_name(nevr.as_str())?; - let available_pkg_id=available_pkg_list.get_pkg_id(0); - queue_push(queue_goal, available_pkg_id); - }, - Err(_) => {bail!("Package {} don't be installed,can't be reinstalled",pkg_name)}, - }; - Ok(()) - } - - pub fn verify_install_pkg(&self, id: i32) -> Result { - let pkg_name = self.solv_get_pkg_name_by_id(id)?; - let installed_id = match self.solv_find_highest_installed(pkg_name.as_str()) { - Ok(s) => s, - Err(_) => { - return Ok(true); - } - }; - let evr_cmp = self.solv_cmp_evr(id, installed_id)?; - Ok(evr_cmp != 0) - } - pub fn verify_upgrade_pkg(&self,id:i32)->Result{ - let pkg_name=self.solv_get_pkg_name_by_id(id)?; - let intalled_id=match self.solv_find_highest_installed(pkg_name.as_str()) { - Ok(s) => {s}, - Err(_) => {return Ok(true);}, - }; - let result=match self.solv_cmp_evr(id, intalled_id) { - Ok(evr) => {evr >0}, - Err(_) => {true}, - }; - Ok(result) - } - pub fn solv_get_pkg_reponame_by_id(&self, pkg_id: i32) -> Result<&str> { - let solv = pool_id2solvable(self.pool, pkg_id); - if solv.is_null() { - bail!(ERROR_RDNF_NO_DATA); - }; - let name_ptr = unsafe { (*(*solv).repo).name }; - if name_ptr.is_null() { - bail!(ERROR_RDNF_NO_DATA); - }; - Ok(unsafe { CStr::from_ptr(name_ptr).to_str()? }) - } -} -impl PkgInfo { - pub fn populate_pkg_info(sack: &Solvsack, pkg_list: &SolvPackageList,level:PkgInfoLevel) -> Result> { - let count = pkg_list.get_size(); - if count == 0 { - bail!(ERROR_RDNF_NO_MATCH); - }; - let mut pkginfos = Vec::new(); - for index in 0..count { - let pkg_id = pkg_list.get_pkg_id(index); - match level { - _ => {}, - } - let base = sack.solv_get_pkginfo_base_by_id(pkg_id)?; - let details = sack.solv_get_pkginfo_details_by_id(pkg_id)?; - let other=match level { - PkgInfoLevel::Other => { - Some(sack.solv_get_pkginfo_other_by_id(pkg_id)?) - }, - _=>{None} - }; - pkginfos.push(PkgInfo { - base, - details, - other, - }); - } - Ok(pkginfos) - } -} -impl Rdnf { - pub fn pkgs_to_exclude(&self) -> Result> { - let mut count = 0; - let mut exclude_pkgs = Vec::new(); - if !self.rc.cli.disable_excludes && self.rc.conf.excludepkgs.is_some() { - println!("Warning: The following packages are excluded from rdnf.conf"); - for ele in self.rc.conf.excludepkgs.as_ref().unwrap() { - print!("{}/t", ele); - exclude_pkgs.push(ele.clone()); - count += 1; - if count % 3 == 0 { - print!("\n"); - } - } - } - if !self.rc.cli.disable_excludes && self.rc.cli.exclude.is_some() { - for ele in self.rc.cli.exclude.as_ref().unwrap() { - exclude_pkgs.push(ele.clone()); - } - } - Ok(exclude_pkgs) - } -} -impl SolvedPkgInfoBase { - pub fn check_protected_pkgs(&self) -> Result<()> { - if self.to_remove.is_some() { - for pkg_info in self.to_remove.as_ref().unwrap() { - if pkg_info.base.name == RDNF_NAME { - bail!(ERROR_RDNF_SELF_ERASE) - } - } - } - if self.obsoleted.is_some() { - for pkg_info in self.obsoleted.as_ref().unwrap() { - if pkg_info.base.name == RDNF_NAME { - bail!(ERROR_RDNF_SELF_ERASE) - } - } - } - Ok(()) - } -} diff --git a/rdnf/src/repo.rs b/rdnf/src/repo.rs new file mode 100644 index 0000000000000000000000000000000000000000..48a0f6d348025fa539fc0b7198760eeec6309fac --- /dev/null +++ b/rdnf/src/repo.rs @@ -0,0 +1,139 @@ +use std::{fmt::Display, ops::Deref, path::Path, sync::Arc}; + +use indradb::{MemoryDatastore, RocksdbDatastore}; +use rpm::{RPMPackage, RPMPackageMetadata}; +use uuid::Uuid; + +use crate::{ + cache::{ + parse_rpm::read_metadata_from_rpm_headers, + parse_xml::{PkgDetailState, XmlState}, + repo_data::RepoData, + }, + conf::repo_conf::RepoConfig, +}; +use anyhow::{bail, Result}; +pub struct Repo { + pub data: RepoData, + pub config: Arc, +} +impl Deref for Repo { + type Target = RepoData; + + fn deref(&self) -> &Self::Target { + &self.data + } +} +impl Clone for Repo { + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + config: self.config.clone(), + } + } +} +pub struct CmdRepo { + pub data: RepoData, +} +impl Clone for CmdRepo { + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + } + } +} +impl Deref for CmdRepo { + type Target = RepoData; + + fn deref(&self) -> &Self::Target { + &self.data + } +} +impl CmdRepo { + pub async fn new + Display>( + paths: Vec

, + ) -> Result<(Self, Vec<(Uuid, String)>)> { + let db = Arc::new(MemoryDatastore::new_db()); + db.index_property(PkgDetailState::Name.as_identifier()) + .unwrap(); + db.index_property(XmlState::Entry.as_identifier().unwrap()) + .unwrap(); + let mut headers = Vec::new(); + let mut pkg_infos = Vec::new(); + for path in paths.iter() { + match RPMPackage::open(path) { + Ok(rpm) => { + let pkg_uuid = get_uuid(&rpm.metadata); + let pkg_name = rpm.metadata.get_name().unwrap().to_string(); + headers.push((pkg_uuid, rpm.metadata.header)); + pkg_infos.push((pkg_uuid, pkg_name)); + } + Err(e) => { + bail!( + "Can't parse rpm file {} to get headers , because {}", + path, + e + ) + } + }; + } + read_metadata_from_rpm_headers(db.clone(), 1, headers).await?; + RepoData::new(&db); + Ok(( + Self { + data: RepoData::new(&db), + }, + pkg_infos, + )) + } +} +fn get_uuid(metadata: &RPMPackageMetadata) -> Uuid { + match metadata.get_md5() { + Ok(b) => { + match Uuid::from_slice(b) { + Ok(s) => { + return s; + } + Err(_) => {} + }; + } + Err(_) => {} + } + return *uuid::Builder::from_md5_bytes(md5::compute(&metadata.header).0).as_uuid(); +} +#[tokio::test] +async fn test_rpm() { + // dbg!(CmdRepo::new(vec!["tests/389-ds-base-2.2.3-1.fc37.x86_64.rpm"]).await.unwrap()); + let (cmd_repo, _pkg_uuids) = CmdRepo::new(vec!["tests/389-ds-base-2.2.3-1.fc37.x86_64.rpm"]) + .await + .unwrap(); + // dbg!(cmd_repo.data.count_pkg().await); + let _detail = cmd_repo.data.get_pkg_detail_by_name("389-ds-base").await; + let pkg_uuid = cmd_repo + .data + .clone() + .get_pkg_uuid_by_pkg_name(Arc::new("389-ds-base".to_string())) + .unwrap(); + let conflicts = cmd_repo.data.get_pkg_conflicts_by_uuid(pkg_uuid).await; + // dbg!(detail); + dbg!(conflicts); +} +#[tokio::test] +async fn test_rpma() { + // dbg!(CmdRepo::new(vec!["tests/389-ds-base-2.2.3-1.fc37.x86_64.rpm"]).await.unwrap()); + let (cmd_repo, _pkg_uuids) = CmdRepo::new(vec!["tests/kdevelop-22.08.1-2.fc37.x86_64.rpm"]) + .await + .unwrap(); + // dbg!(cmd_repo.data.count_pkg().await); + let detail = cmd_repo.data.get_pkg_detail_by_name("kdevelop").await; + dbg!(detail); + // let pkg_uuid = cmd_repo + // .data + // .clone() + // .get_pkg_uuid_by_pkg_name(Arc::new("389-ds-base".to_string())) + // .await + // .unwrap(); + // let conflicts = cmd_repo.data.get_pkg_conflicts_by_uuid(pkg_uuid).await; + // // dbg!(detail); + // dbg!(conflicts); +} diff --git a/rdnf/src/repomd.rs b/rdnf/src/repomd.rs deleted file mode 100644 index 7def6e0ce63f8d88031e99ce94d48e57ee01961f..0000000000000000000000000000000000000000 --- a/rdnf/src/repomd.rs +++ /dev/null @@ -1,199 +0,0 @@ -use anyhow::{bail, Result}; -use quick_xml::{events::Event, Reader}; -use std::{ - fs::{metadata, File}, - io::Read, - path::Path, -}; - -use crate::{ - sub_command::{ - repo::RepoData, - repoutils::{download_file, HashKind}, - }, - RdnfContext, -}; -#[derive(Debug)] -pub struct RepoMd { - pub primary: Option, - pub filelists: Option, - pub updateinfo: Option, - pub other: Option, -} -#[derive(Debug)] -pub struct RepoMdItem { - pub checksum: (HashKind, String), - pub location: String, - pub size: u64, -} -impl RepoMdItem { - #[inline] - pub fn ensure_exists( - mut self, - rc: &RdnfContext, - repo: &RepoData, - prefix_path: &str, - ) -> Result { - let file_path = prefix_path.to_string() + self.location.as_str(); - if !Path::new(file_path.as_str()).exists() { - let url = repo.details.base_url.clone().unwrap()+"/" + self.location.as_str(); - let mut flag = false; - for _i in 1..10 { - download_file(rc, repo, url.as_str(), file_path.as_str(), &repo.psz_id)?; - if metadata(file_path.as_str())?.len() == self.size { - if self - .checksum - .0 - .clone() - .checksum(file_path.as_str(), self.checksum.1.as_str())? - { - flag = true; - break; - }; - } - } - if !flag { - bail!( - "Failed to download file {},or source file corrupted ", - url.as_str() - ); - } - } - self.location = file_path; - Ok(self) - } -} -impl RepoMd { - pub fn parse_from(path: &str) -> Result { - let mut buf = String::new(); - File::open(path)?.read_to_string(&mut buf)?; - let mut reader = Reader::from_str(buf.as_str()); - reader.trim_text(true); - let mut repomd = RepoMd { - primary: None, - filelists: None, - updateinfo: None, - other: None, - }; - loop { - match reader.read_event() { - Ok(Event::Start(data)) => match data.name().as_ref() { - b"data" => { - let mut checksum = (HashKind::Invalid, String::new()); - let mut location = String::new(); - let mut size = 0; - loop { - match reader.read_event()? { - Event::Empty(ele) => match ele.name().as_ref() { - b"location" => { - for attr in ele.attributes() { - let attr = attr?; - if String::from_utf8_lossy(attr.key.as_ref()) == "href" - { - location = - String::from_utf8_lossy(attr.value.as_ref()) - .to_string(); - } - } - } - _ => {} - }, - Event::Start(ele) => match ele.name().as_ref() { - b"checksum" => { - for attr in ele.attributes() { - let attr = attr?; - if String::from_utf8_lossy(attr.key.as_ref()) == "type" - { - let kind = - String::from_utf8_lossy(attr.value.as_ref()) - .to_string(); - checksum.0 = HashKind::from(kind.as_str()); - checksum.1 = - reader.read_text(ele.name())?.to_string(); - } - } - } - b"size" => { - size = reader.read_text(ele.name())?.parse::()?; - } - _ => {} - }, - Event::End(ele) => { - if ele.name().as_ref() == b"data" { - break; - } - } - _ => {} - } - } - let repo_md_item = RepoMdItem { - checksum, - location, - size, - }; - for attr_data in data.attributes() { - let attr_data = attr_data?; - if String::from_utf8_lossy(attr_data.key.as_ref()) == "type" { - match String::from_utf8_lossy(attr_data.value.as_ref()).as_bytes() { - b"primary" => repomd.primary = Some(repo_md_item), - b"filelists" => repomd.filelists = Some(repo_md_item), - b"updateinfo" => repomd.updateinfo = Some(repo_md_item), - b"other" => repomd.other = Some(repo_md_item), - _ => {} - } - break; - } - } - } - _ => {} - }, - Ok(Event::Eof) => break, - Err(e) => { - bail!( - "Failed to parse {} at position {}: {:?}", - path, - reader.buffer_position(), - e - ) - } - _ => {} - } - } - Ok(repomd) - } - pub fn ensure_repo_md_parts( - self, - rc: &RdnfContext, - repo: &RepoData, - cache_dir: String, - ) -> Result { - let mut repo_md = RepoMd { - primary: None, - filelists: None, - updateinfo: None, - other: None, - }; - - if let Some(primary) = self.primary { - repo_md.primary = Some(primary.ensure_exists(rc, repo, cache_dir.as_str())?); - } - - if !repo.base.skip_md_filelists { - if let Some(file_lists) = self.filelists { - repo_md.filelists = Some(file_lists.ensure_exists(rc, repo, cache_dir.as_str())?); - } - } - if !repo.base.skip_md_updateinfo { - if let Some(update_info) = self.updateinfo { - repo_md.updateinfo = - Some(update_info.ensure_exists(rc, repo, cache_dir.as_str())?); - } - } - if !repo.base.skip_md_other { - if let Some(other) = self.other { - repo_md.other = Some(other.ensure_exists(rc, repo, cache_dir.as_str())?); - } - } - Ok(repo_md) - } -} diff --git a/rdnf/src/rpm_trans.rs b/rdnf/src/rpm_trans.rs deleted file mode 100644 index e3e54332dc9e49db6008fe7fb415aec0234374df..0000000000000000000000000000000000000000 --- a/rdnf/src/rpm_trans.rs +++ /dev/null @@ -1,484 +0,0 @@ -use std::ffi::CStr; -use std::ffi::CString; -use std::path::Path; - -use anyhow::bail; -use anyhow::Result; -use console::style; -use console::Term; - -use libc::c_void; -use rpm_sys::ffi::fnpyKey; -use rpm_sys::ffi::rpmDbiTag_e_RPMDBI_LABEL; -use rpm_sys::ffi::rpmProblemFree; -use rpm_sys::ffi::rpmProblemGetStr; -use rpm_sys::ffi::rpmProblemGetType; -use rpm_sys::ffi::rpmProblemString; -use rpm_sys::ffi::rpmProblemType_e_RPMPROB_REQUIRES; -use rpm_sys::ffi::rpmRelocation; -use rpm_sys::ffi::rpmTag; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NODSA; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NODSAHEADER; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NOMD5; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NOPAYLOAD; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NORSA; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NORSAHEADER; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NOSHA1HEADER; -use rpm_sys::ffi::rpmVSFlags_e_RPMVSF_NOSHA256HEADER; -use rpm_sys::ffi::rpmdbFreeIterator; -use rpm_sys::ffi::rpmdbGetIteratorOffset; -use rpm_sys::ffi::rpmdbNextIterator; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_ALERT; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_CRIT; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_DEBUG; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_EMERG; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_ERR; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_INFO; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_NOTICE; -use rpm_sys::ffi::rpmlogLvl_e_RPMLOG_WARNING; -use rpm_sys::ffi::rpmlogSetMask; -use rpm_sys::ffi::rpmprobFilterFlags_e_RPMPROB_FILTER_OLDPACKAGE; -use rpm_sys::ffi::rpmprobFilterFlags_e_RPMPROB_FILTER_REPLACEPKG; -use rpm_sys::ffi::rpmps; -use rpm_sys::ffi::rpmpsFreeIterator; -use rpm_sys::ffi::rpmpsGetProblem; -use rpm_sys::ffi::rpmpsInitIterator; -use rpm_sys::ffi::rpmpsNextIterator; -use rpm_sys::ffi::rpmpsNumProblems; -use rpm_sys::ffi::rpmteEVR; -use rpm_sys::ffi::rpmteN; -use rpm_sys::ffi::rpmtransFlags_e_RPMTRANS_FLAG_NONE; -use rpm_sys::ffi::rpmtransFlags_e_RPMTRANS_FLAG_NOSCRIPTS; -use rpm_sys::ffi::rpmtransFlags_e_RPMTRANS_FLAG_TEST; -use rpm_sys::ffi::rpmts; -use rpm_sys::ffi::rpmtsAddEraseElement; -use rpm_sys::ffi::rpmtsAddInstallElement; -use rpm_sys::ffi::rpmtsCheck; -use rpm_sys::ffi::rpmtsClean; -use rpm_sys::ffi::rpmtsCreate; -use rpm_sys::ffi::rpmtsInitIterator; -use rpm_sys::ffi::rpmtsOrder; -use rpm_sys::ffi::rpmtsProblems; -use rpm_sys::ffi::rpmtsRun; -use rpm_sys::ffi::rpmtsSetFlags; -use rpm_sys::ffi::rpmtsSetRootDir; -use rpm_sys::ffi::rpmtsSetVSFlags; -use rpm_sys::ffi::rpmtsSetVfyLevel; -use rpm_sys::ffi::rpmtsVSFlags; -use rpm_sys::ffi::rpmtsiInit; -use rpm_sys::ffi::rpmtsiNext; -use rpm_sys::ffi::rpmvercmp; -use rpm_sys::ffi::RPMLOG_PRIMASK; -use rpm_sys::ffi::RPMSIG_DIGEST_TYPE; -use rpm_sys::ffi::RPMSIG_SIGNATURE_TYPE; - -use crate::c_lib::set_callbackfunction; -use crate::cli::AlterOption; -use crate::default::RPM_CACHE_DIR_NAME; -use crate::errors::ERROR_RDNF_INVALID_PARAMETER; -use crate::errors::ERROR_RDNF_REPO_NOT_FOUND; -use crate::errors::ERROR_RDNF_RPMTS_CREATE_FAILED; -use crate::errors::ERROR_RDNF_RPM_CHECK; -use crate::errors::ERROR_RDNF_TRANSACTION_FAILED; -use crate::goal::SolvedPkgInfo; -use crate::sub_command::info::PkgInfo; -use crate::sub_command::install::AlterType; -use crate::Rdnf; -pub const RPMVSF_MASK_NODIGESTS: u32 = rpmVSFlags_e_RPMVSF_NOSHA1HEADER - | rpmVSFlags_e_RPMVSF_NOSHA256HEADER - | rpmVSFlags_e_RPMVSF_NOPAYLOAD - | rpmVSFlags_e_RPMVSF_NOMD5; - -pub const RPMVSF_MASK_NOSIGNATURES: u32 = rpmVSFlags_e_RPMVSF_NODSAHEADER - | rpmVSFlags_e_RPMVSF_NORSAHEADER - | rpmVSFlags_e_RPMVSF_NODSA - | rpmVSFlags_e_RPMVSF_NORSA; -pub const RPMSIG_VERIFIABLE_TYPE: u32 = RPMSIG_DIGEST_TYPE | RPMSIG_SIGNATURE_TYPE; -pub struct RpmTs { - pub cached_rpms: Vec, - pub trans_flags: i32, - pub prob_filter_flags: u32, - pub ts: rpmts, -} -impl Rdnf { - pub fn parse_rpm_verbosity(&self) -> u32 { - match self.rc.cli.rpm_verbosity.as_str() { - "emergency" => rpmlogLvl_e_RPMLOG_EMERG, - "alert" => rpmlogLvl_e_RPMLOG_ALERT, - "critical" => rpmlogLvl_e_RPMLOG_CRIT, - "error" => rpmlogLvl_e_RPMLOG_ERR, - "warning" => rpmlogLvl_e_RPMLOG_WARNING, - "notice" => rpmlogLvl_e_RPMLOG_NOTICE, - "info" => rpmlogLvl_e_RPMLOG_INFO, - "debug" => rpmlogLvl_e_RPMLOG_DEBUG, - _ => rpmlogLvl_e_RPMLOG_ERR, - } - } - pub fn rpm_exec_transaction( - &self, - solved_pkg_info: &SolvedPkgInfo, - alter_type: &AlterType, - alter_args: &AlterOption, - ) -> Result<()> { - unsafe { - let p = self.parse_rpm_verbosity(); - let pri = (1 << ((p & RPMLOG_PRIMASK) + 1)) - 1; - rpmlogSetMask(pri) - }; - let mut prob_filter_flags = rpmprobFilterFlags_e_RPMPROB_FILTER_OLDPACKAGE; - if alter_type.is_reinstall() { - prob_filter_flags = prob_filter_flags | rpmprobFilterFlags_e_RPMPROB_FILTER_REPLACEPKG; - } - let ts = unsafe { rpmtsCreate() }; - if ts.is_null() { - bail!(ERROR_RDNF_RPMTS_CREATE_FAILED); - }; - let mut trans_flags = rpmtransFlags_e_RPMTRANS_FLAG_NONE; - if alter_args.tsflags_noscripts { - trans_flags |= rpmtransFlags_e_RPMTRANS_FLAG_NOSCRIPTS; - } - let root_ptr = - CString::new(self.rc.cli.installroot.clone()).unwrap_or(CString::new("/").unwrap()); - unsafe { rpmtsSetRootDir(ts, root_ptr.as_ptr()) }; - let mut rpm_ts = RpmTs { - cached_rpms: Vec::new(), - trans_flags, - prob_filter_flags, - ts, - }; - let (_, width) = self.rc.term.size(); - set_callbackfunction(rpm_ts.ts, alter_args.quiet, width); - self.populate_transaction(&mut rpm_ts, solved_pkg_info, alter_args)?; - self.run_transaction(&mut rpm_ts, &self.rc.term, alter_args)?; - Ok(()) - } - pub fn populate_transaction( - &self, - rpm_ts: &mut RpmTs, - solved_pkg_info: &SolvedPkgInfo, - alter_args: &AlterOption, - ) -> Result<()> { - if solved_pkg_info.base.to_install.is_some() { - let pkg_infos = solved_pkg_info.base.to_install.as_ref().unwrap(); - self.trans_add_install_pkgs(rpm_ts, pkg_infos, 0, alter_args)?; - } - if solved_pkg_info.base.to_reinstall.is_some() { - let pkg_infos = solved_pkg_info.base.to_reinstall.as_ref().unwrap(); - self.trans_add_install_pkgs(rpm_ts, pkg_infos, 0, alter_args)?; - } - if solved_pkg_info.base.to_upgrade.is_some() { - let pkg_infos = solved_pkg_info.base.to_upgrade.as_ref().unwrap(); - self.trans_add_install_pkgs(rpm_ts, pkg_infos, 1, alter_args)?; - } - if solved_pkg_info.base.to_remove.is_some() { - let pkg_infos = solved_pkg_info.base.to_remove.as_ref().unwrap(); - self.trans_add_erase_pkg(rpm_ts, pkg_infos); - } - if solved_pkg_info.base.obsoleted.is_some() { - let pkg_infos = solved_pkg_info.base.obsoleted.as_ref().unwrap(); - self.trans_add_erase_pkg(rpm_ts, pkg_infos); - } - if solved_pkg_info.base.to_downgrade.is_some() { - let pkg_infos = solved_pkg_info.base.to_downgrade.as_ref().unwrap(); - self.trans_add_install_pkgs(rpm_ts, pkg_infos, 0, alter_args)?; - if solved_pkg_info.base.removed_by_downgrade.is_some() { - let pkg_infos = solved_pkg_info.base.removed_by_downgrade.as_ref().unwrap(); - self.trans_add_erase_pkg(rpm_ts, pkg_infos); - } - } - Ok(()) - } - pub fn trans_add_install_pkgs( - &self, - rpm_ts: &mut RpmTs, - pkg_infos: &Vec, - upgrade: i32, - alter_args: &AlterOption, - ) -> Result<()> { - for pkg_info in pkg_infos { - let mut location = pkg_info.details.location.clone().unwrap(); - let pkg_name = pkg_info.base.name.as_str(); - let repo_name = pkg_info.base.repo_name.as_str(); - let repo = match self.repos.iter().find(|x| x.psz_id == repo_name) { - Some(repo) => repo, - None => { - bail!(ERROR_RDNF_REPO_NOT_FOUND) - } - }; - if !Path::new(location.as_str()).exists() { - location = match repo.details.base_url.as_ref() { - Some(base_url) => { - let url = - base_url.trim_end_matches("/").to_string() + "/" + location.as_str(); - self.download_pkg_to_cache( - url.as_str(), - pkg_name, - repo, - RPM_CACHE_DIR_NAME, - )? - } - None => { - bail!(ERROR_RDNF_REPO_NOT_FOUND) - } - }; - }; - let (header, gpg_check) = - self.gpgcheck_pkg(rpm_ts, location.as_str(), repo, alter_args)?; - if !gpg_check { - unsafe { - rpmtsSetVSFlags( - rpm_ts.ts, - rpmtsVSFlags(rpm_ts.ts) | RPMVSF_MASK_NODIGESTS | RPMVSF_MASK_NOSIGNATURES, - ); - rpmtsSetVfyLevel(rpm_ts.ts, !RPMSIG_VERIFIABLE_TYPE as i32); - } - } - unsafe { - let file_ptr = CString::new(location.as_str()).unwrap().into_raw(); - rpmtsAddInstallElement( - rpm_ts.ts, - header, - file_ptr as fnpyKey, - upgrade, - 0 as *mut rpmRelocation, - ) - }; - rpm_ts.cached_rpms.push(location); - } - Ok(()) - } - pub fn trans_add_erase_pkg(&self, rpm_ts: &mut RpmTs, pkg_infos: &Vec) { - for pkg_info in pkg_infos { - let pkg_name = CString::new(pkg_info.base.name.as_str()).unwrap(); - let iter = unsafe { - rpmtsInitIterator( - rpm_ts.ts, - rpmDbiTag_e_RPMDBI_LABEL as rpmTag, - pkg_name.as_ptr() as *const c_void, - 0, - ) - }; - loop { - let rpm_header = unsafe { rpmdbNextIterator(iter) }; - if rpm_header.is_null() { - break; - } - let offset = unsafe { rpmdbGetIteratorOffset(iter) }; - if offset > 0 { - unsafe { rpmtsAddEraseElement(rpm_ts.ts, rpm_header, offset as i32) }; - } - } - if !iter.is_null() { - unsafe { rpmdbFreeIterator(iter) }; - } - } - } - pub fn run_transaction( - &self, - rpm_ts: &mut RpmTs, - term: &Term, - alter_args: &AlterOption, - ) -> Result<()> { - let mut rpm_vfy_level_mask = 0; - unsafe { rpmtsOrder(rpm_ts.ts) }; - rpm_ts.do_check(term)?; - unsafe { rpmtsClean(rpm_ts.ts) }; - if alter_args.no_gpg_check { - unsafe { - rpmtsSetVSFlags( - rpm_ts.ts, - rpmtsVSFlags(rpm_ts.ts) | RPMVSF_MASK_NODIGESTS | RPMVSF_MASK_NOSIGNATURES, - ); - rpmtsSetVSFlags(rpm_ts.ts, !RPMSIG_VERIFIABLE_TYPE); - } - } else if alter_args.skip_signatures || alter_args.skip_digest { - if alter_args.skip_signatures { - unsafe { - rpmtsSetVSFlags( - rpm_ts.ts, - rpmtsVSFlags(rpm_ts.ts) | RPMVSF_MASK_NOSIGNATURES, - ); - rpm_vfy_level_mask |= RPMSIG_SIGNATURE_TYPE; - } - } - if alter_args.skip_digest { - unsafe { - rpmtsSetVSFlags(rpm_ts.ts, rpmtsVSFlags(rpm_ts.ts) | RPMVSF_MASK_NODIGESTS); - rpm_vfy_level_mask |= RPMSIG_DIGEST_TYPE; - } - } - unsafe { - rpmtsSetVfyLevel(rpm_ts.ts, !rpm_vfy_level_mask as i32); - } - } - let rc = unsafe { - rpmtsSetFlags(rpm_ts.ts, rpmtransFlags_e_RPMTRANS_FLAG_TEST as u32); - rpmtsRun(rpm_ts.ts, 0 as rpmps, rpm_ts.prob_filter_flags) - }; - if rc != 0 { - println!("a"); - bail!(ERROR_RDNF_TRANSACTION_FAILED); - } - term.write_line("Running transaction")?; - unsafe { rpmtsSetFlags(rpm_ts.ts, rpm_ts.trans_flags as u32) }; - let rc = unsafe { rpmtsRun(rpm_ts.ts, 0 as rpmps, rpm_ts.prob_filter_flags) }; - if rc != 0 { - println!("b"); - bail!(ERROR_RDNF_TRANSACTION_FAILED); - } - Ok(()) - } -} -// pub fn rdnf_rpm_cb( -// ) -> unsafe extern "C" fn(*const c_void, u32, u64, u64, *const c_void, *mut c_void) -> *mut c_void { -// unsafe extern "C" fn cb( -// arg: *const c_void, -// what: u32, -// amount: u64, -// total: u64, -// key: *const c_void, -// data: *mut c_void, -// ) -> *mut c_void { -// let pkg_header = arg as Header; -// let mut callback_data = Box::from_raw(data as *mut RpmTsCallback); -// let file_name_ptr = key as *const c_char; -// let nevra = CStr::from_ptr(headerGetAsString(pkg_header, rpmTag_e_RPMTAG_NEVRA)) -// .to_str() -// .unwrap(); -// match what { -// rpmCallbackType_e_RPMCALLBACK_INST_OPEN_FILE => { -// if file_name_ptr.is_null() { -// println!("rpmcallback_inst_open_file null "); -// return 0 as *mut c_void; -// } else { -// println!( -// "rpmcallback_inst_open_file {}", -// CStr::from_ptr(file_name_ptr).to_str().unwrap() -// ); -// let mode = CString::new("r.ufdio").unwrap(); -// let fd = Fopen(file_name_ptr, mode.as_ptr()); -// callback_data.fs = Some(fd); -// } -// } -// rpmCallbackType_e_RPMCALLBACK_INST_CLOSE_FILE => { -// if callback_data.fs.is_some() { -// let fs_ptr = callback_data.fs.unwrap(); -// if !fs_ptr.is_null() { -// println!( -// "rpmcallback_inst_close_file {}", -// CStr::from_ptr(file_name_ptr).to_str().unwrap() -// ); -// Fclose(fs_ptr); -// callback_data.fs = None; -// } -// } -// } -// rpmCallbackType_e_RPMCALLBACK_INST_START => { -// if !callback_data.quiet { -// println!("{:<20}{}", ACTION_ALTER_INTALL, nevra); -// } -// } -// rpmCallbackType_e_RPMCALLBACK_UNINST_START => { -// if !callback_data.quiet { -// println!("{:<20}{}", ACTION_ALTER_ERASE, nevra); -// } -// } -// rpmCallbackType_e_RPMCALLBACK_SCRIPT_ERROR => { -// let script = match amount as i32 { -// rpmTag_e_RPMTAG_PREIN => "%prein", -// rpmTag_e_RPMTAG_POSTIN => "%postin", -// rpmTag_e_RPMTAG_PREUN => "%preun", -// rpmTag_e_RPMTAG_POSTUN => "%postun", -// _ => "(unkown)", -// }; -// let flag = if total == rpmRC_e_RPMRC_OK as u64 { -// "warning" -// } else { -// "error" -// }; -// println!("package {}: script {} in {}", nevra, flag, script); -// } -// _ => {} -// } - -// let _ = Box::into_raw(callback_data); -// 0 as *mut c_void -// } -// cb -// } -impl RpmTs { - pub fn do_check(&self, term: &Term) -> Result<()> { - let _nresult = unsafe { rpmtsCheck(self.ts) }; - let ps = unsafe { rpmtsProblems(self.ts) }; - if !ps.is_null() { - let n_probs = unsafe { rpmpsNumProblems(ps) }; - if n_probs > 0 { - term.write_line(format!("Found {} problems", n_probs).as_str())?; - let psi = unsafe { rpmpsInitIterator(ps) }; - while unsafe { rpmpsNextIterator(psi) } >= 0 { - let prob = unsafe { rpmpsGetProblem(psi) }; - let msg_ptr = unsafe { rpmProblemString(prob) }; - let msg = unsafe { CStr::from_ptr(msg_ptr).to_str()? }; - if msg.matches("no digest").collect::>().len() >= 1 { - let info = - format!("{}. Use {} to ignore", msg, style("--skipdigest").red()); - term.write_line(info.as_str())?; - } else { - term.write_line(msg)?; - if unsafe { rpmProblemGetType(prob) } == rpmProblemType_e_RPMPROB_REQUIRES { - let error_str = - unsafe { CStr::from_ptr(rpmProblemGetStr(prob)).to_str()? }; - // Error str has the format: - let token = error_str.split(' ').collect::>(); - if token.len() != 3 { - term.write_line("RPM problem string format unsupported")?; - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - let pkg_name = token[0]; - let pkg_symbol = token[1]; - let pkg_version = token[2]; - let pkg_version_c = CString::new(pkg_version).unwrap(); - let pi = unsafe { rpmtsiInit(self.ts) }; - loop { - let pte = unsafe { rpmtsiNext(pi, 0) }; - if pte.is_null() { - break; - } - let cached_pkg_name = - unsafe { CStr::from_ptr(rpmteN(pte)).to_str()? }; - let cached_pkg_evr_ptr = unsafe { rpmteEVR(pte) }; - if cached_pkg_name == token[0] { - let more = pkg_symbol.find(">").is_some() - && unsafe { - rpmvercmp(cached_pkg_evr_ptr, pkg_version_c.as_ptr()) - > 0 - }; - let less = pkg_symbol.find("<").is_some() - && unsafe { - rpmvercmp(cached_pkg_evr_ptr, pkg_version_c.as_ptr()) - < 0 - }; - let equal = pkg_symbol.find("=").is_some() - && unsafe { - rpmvercmp(cached_pkg_evr_ptr, pkg_version_c.as_ptr()) - == 0 - }; - if more || less || equal { - let item=format!("Detected rpm pre-transaction dependency errors. Install {} {} {} first to resolve this failure", - pkg_name,pkg_symbol,pkg_version); - term.write_line(item.as_str())?; - break; - } - } - } - } - } - unsafe { rpmProblemFree(prob) }; - } - unsafe { rpmpsFreeIterator(psi) }; - bail!(ERROR_RDNF_RPM_CHECK); - } - } - Ok(()) - } -} diff --git a/rdnf/src/solv/mod.rs b/rdnf/src/solv/mod.rs deleted file mode 100644 index 0ee146e54abdd89e3c58d77a4476008424857352..0000000000000000000000000000000000000000 --- a/rdnf/src/solv/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use solv_sys::ffi::{Queue, Repo }; - - - -pub mod rdnf_pkg; -pub mod rdnf_query; -pub mod rdnf_repo; -pub mod sack; -#[derive(Debug, Clone)] -pub struct SolvRepoInfoInernal { - pub repo: *mut Repo, - pub cookie: Option<[u8; 32]>, - pub n_cookie_set: Option, - pub repo_cache_dir: Option, -} -pub struct SolvPackageList { - pub pkgs: Queue, -} -impl SolvPackageList { - pub fn get_size(&self) -> u32 { - self.pkgs.count as u32 - } -} - - diff --git a/rdnf/src/solv/rdnf_pkg.rs b/rdnf/src/solv/rdnf_pkg.rs deleted file mode 100644 index 43687026825497ae490e8e00e8d372fc97e61704..0000000000000000000000000000000000000000 --- a/rdnf/src/solv/rdnf_pkg.rs +++ /dev/null @@ -1,441 +0,0 @@ -use std::{ - ffi::{CStr, CString}, - mem, -}; - -use crate::{ - c_lib::{ - create_dataiterator_empty, get_queue_element_value, map_set, map_setall, - pool_disabled_solvable, pool_id2solvable, - }, - errors::{ - ERROR_RDNF_INVALID_PARAMETER, ERROR_RDNF_NO_DATA, ERROR_RDNF_NO_MATCH, - ERROR_RDNF_SOLV_FAILED, - }, - sub_command::{ - info::{PkgInfoBase, PkgInfoDetails, PkgInfoOther}, - install::is_glob, - }, - utils::{c_str_ptr_to_rust_string, format_size}, -}; - -use super::{ - rdnf_query::{init_queue, SolvQuery}, - sack::Solvsack, - SolvPackageList, -}; - -use anyhow::{bail, Result}; -use libc::strtol; - -use solv_sys::ffi::{ - dataiterator_init, dataiterator_step, map_grow, map_init, map_subtract, pool_evrcmp_str, - pool_id2str, pool_solvable2str, queue_insertn, solv_knownid_SOLVABLE_ARCH, - solv_knownid_SOLVABLE_EVR, solv_knownid_SOLVABLE_INSTALLSIZE, solv_knownid_SOLVABLE_NAME, - solv_knownid_SOLVABLE_SUMMARY, solv_knownid_SOLVABLE_URL, solvable_get_location, - solvable_lookup_num, solvable_lookup_str, solver_findproblemrule, solver_problem_count, - solver_problemruleinfo2str, solver_ruleinfo, Dataiterator, Map, Pool, Queue, Repo, Solver, - SolverRuleinfo_SOLVER_RULE_PKG_CONFLICTS, SolverRuleinfo_SOLVER_RULE_PKG_IMPLICIT_OBSOLETES, - SolverRuleinfo_SOLVER_RULE_PKG_INSTALLED_OBSOLETES, - SolverRuleinfo_SOLVER_RULE_PKG_NOT_INSTALLABLE, SolverRuleinfo_SOLVER_RULE_PKG_OBSOLETES, - SolverRuleinfo_SOLVER_RULE_PKG_REQUIRES, SolverRuleinfo_SOLVER_RULE_PKG_SELF_CONFLICT, - EVRCMP_COMPARE, SEARCH_GLOB, SEARCH_STRING, solv_knownid_SOLVABLE_LICENSE, solv_knownid_SOLVABLE_DESCRIPTION, -}; -#[derive(Debug, Clone, Copy)] -pub struct SkipProblem { - pub none: bool, - pub conflicts: bool, - pub obsoletes: bool, - pub disabled: bool, -} -pub fn skip_based_on_type( - solv: *mut Solver, - rule_type: u32, - source: i32, - skip_problem: SkipProblem, -) -> bool { - let mut result = false; - if skip_problem.conflicts { - result = result - || rule_type == SolverRuleinfo_SOLVER_RULE_PKG_CONFLICTS - || rule_type == SolverRuleinfo_SOLVER_RULE_PKG_SELF_CONFLICT; - } - if skip_problem.obsoletes { - result = result - || rule_type == SolverRuleinfo_SOLVER_RULE_PKG_OBSOLETES - || rule_type == SolverRuleinfo_SOLVER_RULE_PKG_IMPLICIT_OBSOLETES - || rule_type == SolverRuleinfo_SOLVER_RULE_PKG_INSTALLED_OBSOLETES; - } - if skip_problem.disabled { - if rule_type == SolverRuleinfo_SOLVER_RULE_PKG_NOT_INSTALLABLE { - let s = unsafe { pool_id2solvable((*solv).pool, source) }; - if unsafe { pool_disabled_solvable((*solv).pool, s) } { - result = true; - }; - } - } - result -} - -impl Solvsack { - pub fn solv_count_pkg_by_name(&self, pkg: &str) -> Result { - let mut p_query = SolvQuery::default(self.clone()); - p_query.solv_apply_single_pkg_filter(pkg)?; - p_query.solv_apply_list_query()?; - let pkg_list = p_query.solv_get_query_result()?; - Ok(pkg_list.get_size()) - } - pub fn solv_find_all_installed(&self) -> Result { - let mut p_query = SolvQuery::default(self.clone()); - p_query.solv_add_system_repo_filter()?; - p_query.solv_apply_list_query()?; - let pkgs = p_query.solv_get_query_result()?; - Ok(pkgs) - } - pub fn solv_find_installed_pkg_by_multiple_names( - &self, - pkg_names: Vec, - ) -> Result { - let mut p_query = SolvQuery::default(self.clone()); - p_query.solv_add_system_repo_filter()?; - p_query.package_names = Some(pkg_names.clone()); - p_query.solv_apply_list_query()?; - let pkgs = p_query.solv_get_query_result()?; - Ok(pkgs) - } - pub fn solv_find_installed_pkg_by_name(&self, pkg_name: &str) -> Result { - let mut p_query = SolvQuery::default(self.clone()); - p_query.solv_add_system_repo_filter()?; - p_query.solv_apply_single_pkg_filter(pkg_name)?; - p_query.solv_apply_list_query()?; - let pkgs = p_query.solv_get_query_result()?; - Ok(pkgs) - } - pub fn solv_get_pkg_name_by_id(&self, id: i32) -> Result { - let p_solv = pool_id2solvable(self.pool, id); - if p_solv.is_null() { - bail!(ERROR_RDNF_NO_DATA); - } - unsafe { - let psz_temp = pool_id2str(self.pool, (*p_solv).name); - if psz_temp.is_null() { - bail!(ERROR_RDNF_NO_DATA) - } - Ok(CStr::from_ptr(psz_temp).to_str().unwrap().to_string()) - } - } - pub fn solv_get_pkg_nevr_by_id(&self, id: i32) -> Result { - let p_solv = pool_id2solvable(self.pool, id); - if p_solv.is_null() { - bail!(ERROR_RDNF_NO_DATA); - } - unsafe { - let psz_temp = pool_solvable2str(self.pool, p_solv); - if psz_temp.is_null() { - bail!(ERROR_RDNF_NO_DATA) - } - Ok(CStr::from_ptr(psz_temp).to_str().unwrap().to_string()) - } - } - pub fn solv_find_available_pkg_by_name(&self, pkg_name: &str) -> Result { - let mut p_query = SolvQuery::default(self.clone()); - p_query.solv_add_available_repo_filter()?; - p_query.solv_apply_single_pkg_filter(pkg_name)?; - p_query.solv_apply_list_query()?; - Ok(p_query.solv_get_query_result()?) - } - pub fn solv_find_highest_available(&self, pkg_name: &str) -> Result { - let pkg_list = self.solv_find_available_pkg_by_name(pkg_name)?; - let mut highest_available = pkg_list.get_pkg_id(0); - let count = pkg_list.get_size(); - for index in 1..count { - let id = pkg_list.get_pkg_id(index); - if self.solv_cmp_evr(id, highest_available)? > 0 { - highest_available = id; - }; - } - Ok(highest_available) - } - pub fn solv_find_highest_or_lowest_installed( - &self, - pkg_name: &str, - is_higher: bool, - ) -> Result { - let installed_pkg_list = self.solv_find_installed_pkg_by_name(pkg_name)?; - let mut high_or_low = installed_pkg_list.get_pkg_id(0); - if high_or_low != 0 { - let count = installed_pkg_list.get_size(); - for index in 1..count { - let id = installed_pkg_list.get_pkg_id(index); - let cmp = self.solv_cmp_evr(id, high_or_low)?; - match is_higher { - true => { - if cmp > 0 { - high_or_low = id; - } - } - false => { - if cmp < 0 { - high_or_low = id; - } - } - } - } - } - Ok(high_or_low) - } - pub fn solv_find_highest_installed(&self, pkg_name: &str) -> Result { - Ok(self.solv_find_highest_or_lowest_installed(pkg_name, true)?) - } - pub fn solv_find_lowest_installed(&self, pkg_name: &str) -> Result { - Ok(self.solv_find_highest_or_lowest_installed(pkg_name, false)?) - } - pub fn solv_cmp_evr(&self, id1: i32, id2: i32) -> Result { - let pool = self.pool; - let solv1 = pool_id2solvable(pool, id1); - let solv2 = pool_id2solvable(pool, id2); - if solv1.is_null() || solv2.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - unsafe { - let evr1 = solvable_lookup_str(solv1, solv_knownid_SOLVABLE_EVR as i32); - let evr2 = solvable_lookup_str(solv2, solv_knownid_SOLVABLE_EVR as i32); - let p = pool_evrcmp_str(pool, evr1, evr2, EVRCMP_COMPARE as i32); - Ok(p) - } - } - pub fn solv_report_problems(&self, solv: *mut Solver, skip_problem: SkipProblem) -> Result<()> { - let mut count = unsafe { solver_problem_count(solv) }; - let mut source = 0; - let mut target = 0; - let mut dep = 0; - let mut prv_pkg_name = ""; - let mut error = ""; - let mut total_problems = 0; - while count > 0 { - let problem_id = unsafe { solver_findproblemrule(solv, count as i32) }; - let rule_type = - unsafe { solver_ruleinfo(solv, problem_id, &mut source, &mut target, &mut dep) }; - if skip_based_on_type(solv, rule_type, source, skip_problem) { - count -= 1; - continue; - }; - let psz_problem = - unsafe { solver_problemruleinfo2str(solv, rule_type, source, target, dep) }; - let problem = unsafe { CStr::from_ptr(psz_problem).to_str().unwrap() }; - if !skip_problem.none && rule_type == SolverRuleinfo_SOLVER_RULE_PKG_REQUIRES { - let (_, beg) = problem.split_once("requires").unwrap(); - let (beg, _) = beg.split_once(",").unwrap(); - let pkg_name = beg.trim_start().trim_end(); - if pkg_name == prv_pkg_name { - continue; - } - prv_pkg_name = pkg_name; - match self.solv_find_available_pkg_by_name(pkg_name) { - Ok(_) => { - continue; - } - Err(_) => {} - }; - } - error = ERROR_RDNF_SOLV_FAILED; - total_problems += 1; - println!("{}. {}", total_problems, problem); - } - if error != "" { - bail!("Found {} problem(s) while resolving", total_problems); - } - Ok(()) - } - pub fn solv_get_pkginfo_base_by_id(&self, pkg_id: i32) -> Result { - let solv = pool_id2solvable(self.pool, pkg_id); - if solv.is_null() { - bail!(ERROR_RDNF_NO_DATA); - } - let name = c_str_ptr_to_rust_string(unsafe { - solvable_lookup_str(solv, solv_knownid_SOLVABLE_NAME as i32) - }) - .unwrap_or("".to_string()); - let arch = c_str_ptr_to_rust_string(unsafe { - solvable_lookup_str(solv, solv_knownid_SOLVABLE_ARCH as i32) - }) - .unwrap_or("".to_string()); - let evr = c_str_ptr_to_rust_string(unsafe { - solvable_lookup_str(solv, solv_knownid_SOLVABLE_EVR as i32) - }) - .unwrap_or("".to_string()); - let (epoch, version, release) = solv_split_evr(evr.as_str()); - let mut dw_epoch = 0; - if epoch != "" { - unsafe { - let epoch_c = CString::new(epoch).unwrap(); - dw_epoch = strtol(epoch_c.as_ptr(), 0 as *mut *mut i8, 10); - } - }; - let repo_name_ptr = unsafe { (*(*solv).repo).name }; - if repo_name_ptr.is_null() { - bail!(ERROR_RDNF_NO_DATA); - }; - let repo_name = unsafe { CStr::from_ptr(repo_name_ptr).to_str()? }.to_string(); - Ok(PkgInfoBase { - epoch: dw_epoch as u32, - name: name.to_string(), - version: version.to_string(), - release: release.to_string(), - arch, - evr, - repo_name, - }) - } - pub fn solv_get_pkginfo_details_by_id(&self, pkg_id: i32) -> Result { - let solv = pool_id2solvable(self.pool, pkg_id); - if solv.is_null() { - bail!(ERROR_RDNF_NO_DATA); - }; - let summary = c_str_ptr_to_rust_string(unsafe { - solvable_lookup_str(solv, solv_knownid_SOLVABLE_SUMMARY as i32) - }) - .unwrap_or("".to_string()); - let location = - c_str_ptr_to_rust_string(unsafe { solvable_get_location(solv, 0 as *mut u32) }); - let install_size = - unsafe { solvable_lookup_num(solv, solv_knownid_SOLVABLE_INSTALLSIZE as i32, 0) }; - let formatted_size = format_size(install_size); - Ok(PkgInfoDetails { - install_size, - formatted_size, - summary, - location, - }) - } - pub fn solv_get_pkginfo_other_by_id(&self, pkg_id: i32) -> Result { - let solv = pool_id2solvable(self.pool, pkg_id); - let url = c_str_ptr_to_rust_string(unsafe { - solvable_lookup_str(solv, solv_knownid_SOLVABLE_URL as i32) - }) - .unwrap_or("None".to_string()); - let license=c_str_ptr_to_rust_string(unsafe { - solvable_lookup_str(solv, solv_knownid_SOLVABLE_LICENSE as i32) - }).unwrap_or("None".to_string()); - let description=c_str_ptr_to_rust_string(unsafe { - solvable_lookup_str(solv, solv_knownid_SOLVABLE_DESCRIPTION as i32) - }).unwrap_or("None".to_string()); - Ok(PkgInfoOther { - url, - license, - description, - }) - } - pub fn solv_get_pkginfo_by_id(&self, pkg_id: i32, which_info: i32) -> Result<&str> { - let solv = pool_id2solvable(self.pool, pkg_id); - Ok(unsafe { CStr::from_ptr(solvable_lookup_str(solv, which_info)).to_str()? }) - } - pub fn solv_get_pkg_location_by_id(&self, pkg_id: i32) -> Result<&str> { - let solv = pool_id2solvable(self.pool, pkg_id); - Ok(unsafe { CStr::from_ptr(solvable_get_location(solv, 0 as *mut u32)).to_str()? }) - } - pub fn solv_get_pkg_install_size_by_id(&self, pkg_id: i32) -> Result { - let solv = pool_id2solvable(self.pool, pkg_id); - Ok(unsafe { solvable_lookup_num(solv, solv_knownid_SOLVABLE_INSTALLSIZE as i32, 0) }) - } -} -impl SolvQuery { - pub fn solv_get_query_result(&self) -> Result { - if self.queue_result.count == 0 { - bail!(ERROR_RDNF_NO_MATCH); - } - let mut solv_pkgs_list = SolvPackageList { pkgs: init_queue() }; - unsafe { - queue_insertn( - &mut solv_pkgs_list.pkgs as *mut Queue, - solv_pkgs_list.pkgs.count, - self.queue_result.count, - self.queue_result.elements, - ); - } - Ok(solv_pkgs_list) - } -} -impl SolvPackageList { - pub fn get_pkg_id(&self, index: u32) -> i32 { - let p = &self.pkgs as *const Queue; - get_queue_element_value(p, index) - } - pub fn queue_to_pkg_list(queue: &mut Queue) -> Result { - if queue.count == 0 { - bail!(ERROR_RDNF_NO_MATCH) - }; - let mut solv_pkgs_list = SolvPackageList { pkgs: init_queue() }; - unsafe { - queue_insertn( - &mut solv_pkgs_list.pkgs, - solv_pkgs_list.pkgs.count, - queue.count, - queue.elements, - ); - }; - Ok(solv_pkgs_list) - } -} -pub fn solv_add_excludes(pool: *mut Pool, excludes: &Vec) { - let mut excludes_map = unsafe { init_map((*pool).nsolvables) }; - solv_data_iterator(pool, excludes, &mut excludes_map); - unsafe { - if (*pool).considered.is_null() { - (*pool).considered = libc::malloc(mem::size_of::()) as *mut Map; - map_init((*pool).considered, (*pool).nsolvables); - } else { - map_grow((*pool).considered, (*pool).nsolvables); - } - map_setall((*pool).considered); - map_subtract((*pool).considered, &excludes_map as *const Map); - } -} -pub fn solv_data_iterator(pool: *mut Pool, excludes: &Vec, map: &mut Map) { - let mut di = create_dataiterator_empty(); - let di_ptr = &mut di as *mut Dataiterator; - let keyname = solv_knownid_SOLVABLE_NAME; - for ele in excludes { - let mut flags = SEARCH_STRING; - if is_glob(ele.as_str()) { - flags = SEARCH_GLOB; - }; - - unsafe { - let temp = CString::new(ele.as_str()).unwrap(); - dataiterator_init( - di_ptr, - pool, - 0 as *mut Repo, - 0, - keyname as i32, - temp.as_ptr(), - flags as i32, - ); - while dataiterator_step(di_ptr) != 0 { - map_set(map as *mut Map, di.solvid); - } - } - } -} -pub fn init_map(n: i32) -> Map { - let mut map = Map { - map: CString::new("").unwrap().into_raw() as *mut u8, - size: 0, - }; - unsafe { - map_init(&mut map as *mut Map, n); - }; - map -} -pub fn solv_split_evr(evr: &str) -> (&str, &str, &str) { - let (evr, rest) = match evr.split_once(":") { - Some(s) => s, - None => ("", evr), - }; - let (version, release) = match rest.split_once("-") { - Some(s) => s, - None => ("", rest), - }; - (evr, version, release) -} diff --git a/rdnf/src/solv/rdnf_query.rs b/rdnf/src/solv/rdnf_query.rs deleted file mode 100644 index e5127fa15e0147028b0d925c07eee78bd424c917..0000000000000000000000000000000000000000 --- a/rdnf/src/solv/rdnf_query.rs +++ /dev/null @@ -1,322 +0,0 @@ -use std::ffi::{CStr, CString}; - -use solv_sys::ffi::{ - queue_init, queue_insertn, selection_filter, selection_make, Queue, Solver, SELECTION_CANON, - SELECTION_DOTARCH, SELECTION_FILELIST, SELECTION_GLOB, SELECTION_NAME, SELECTION_NOCASE, - SELECTION_PROVIDES, SELECTION_REL, SOLVER_DISTUPGRADE, SOLVER_SELECTMASK, - SOLVER_SETREPO, SOLVER_SETVENDOR, SOLVER_SOLVABLE, SOLVER_SOLVABLE_ALL, SOLVER_SOLVABLE_NAME, - SOLVER_SOLVABLE_ONE_OF, SOLVER_SOLVABLE_REPO, -}; - -use crate::{ - c_lib::{ - get_pool_solvables_value, get_pool_whatprovidesdata_value, get_queue_element_value, - is_pseudo_package, pool_id2repo, pool_id2solvable, pool_match_nevr, pool_whatprovides, - queue_empty, queue_push, queue_push2, - }, - default::SYSTEM_REPO_NAME, - errors::ERROR_RDNF_INVALID_PARAMETER, -}; - -use super::{sack::Solvsack, SolvPackageList}; -use anyhow::{bail, Result}; -pub struct SolvQuery { - pub sack: Solvsack, - pub queue_job: Queue, - pub p_solv: Option<*mut Solver>, - pub queue_repo_filter: Queue, - pub package_names: Option>, - pub queue_result: Queue, - pub dw_new_packages: Option, - // pub scope: Option, -} -impl SolvQuery { - pub fn default(solv_sack: Solvsack) -> Self { - SolvQuery { - sack: solv_sack, - queue_job: init_queue(), - p_solv: None, - queue_repo_filter: init_queue(), - package_names: None, - queue_result: init_queue(), - dw_new_packages: None, - // scope: None, - } - } - // pub fn solv_apply_pkg_filter(&mut self,pkg_names:&Vec)->Result<()>{ - // let p=pkg_names.clone(); - // self.package_names=Some(p); - // Ok(()) - // } - pub fn solv_apply_list_query(&mut self) -> Result<()> { - let mut queue_temp = init_queue(); - let queue_temp_ptr = &mut queue_temp as *mut Queue; - let flags = SELECTION_NAME | SELECTION_PROVIDES | SELECTION_GLOB; - let flags = flags | (SELECTION_CANON | SELECTION_DOTARCH | SELECTION_REL); - self.solv_generate_common_job(flags)?; - // let scope = self.scope.clone(); - // if scope.as_ref().is_some() && scope.as_ref().unwrap().is_upgrades() { - // self.solv_apply_up_down_scope(true); - // todo!(); - // } else if scope.as_ref().is_some() && scope.as_ref().unwrap().is_down_grades() { - // } else - if self.queue_job.count > 0 { - let mut index: u32 = 0; - while index < self.queue_job.count as u32 { - queue_empty(queue_temp_ptr); - let what = - get_queue_element_value(&mut self.queue_job as *mut Queue, index + 1 as u32); - let how = SOLVER_SELECTMASK - & get_queue_element_value(&mut self.queue_job as *mut Queue, index) as u32; - let pool = self.sack.pool; - if how == SOLVER_SOLVABLE_ALL { - let mut p = 2; - unsafe { - while p < (*pool).nsolvables { - let solvable_item = get_pool_solvables_value(pool, p as u32); - if !(*solvable_item).repo.is_null() - && !is_pseudo_package(pool, solvable_item) - { - queue_push(queue_temp_ptr, p); - }; - p += 1; - } - } - } else if how == SOLVER_SOLVABLE_REPO { - let repo = pool_id2repo(pool, what); - if !repo.is_null() { - unsafe { - let mut p = (*repo).start; - let mut s = pool_id2solvable((*repo).pool, p); - while p < (*repo).end { - let solvable_item = get_pool_solvables_value(pool, p as u32); - if (*s).repo == repo && !is_pseudo_package(pool, solvable_item) { - queue_push(queue_temp_ptr, p); - }; - p += 1; - s = pool_id2solvable((*repo).pool, p); - } - } - } - } else { - let mut pp = if how == SOLVER_SOLVABLE { - 0 - } else { - if how == SOLVER_SOLVABLE_ONE_OF { - what - } else { - pool_whatprovides(pool, what) - } - }; - let mut p = if how == SOLVER_SOLVABLE { - what - } else { - get_pool_whatprovidesdata_value(pool, pp) - }; - pp += 1; - while p != 0 { - let s = pool_id2solvable(pool, p); - let solvable_item = get_pool_solvables_value(pool, p as u32); - if !(how == SOLVER_SOLVABLE_NAME && pool_match_nevr(pool, s, what) == 0) - && !(is_pseudo_package(pool, solvable_item)) - { - queue_push(queue_temp_ptr, p); - } - p = get_pool_whatprovidesdata_value(pool, pp); - pp += 1; - } - } - unsafe { - queue_insertn( - &mut self.queue_result as *mut Queue, - self.queue_result.count, - queue_temp.count, - queue_temp.elements, - ); - } - index += 2; - } - } else if self.package_names.is_none() { - let pool = self.sack.pool; - // let mut p = 2; - unsafe { - // while p < (*pool).nsolvables { - // let solvable_item = get_pool_solvables_value(pool, p as u32); - // if !(*solvable_item).repo.is_null() && !is_pseudo_package(pool, solvable_item) { - // queue_push(queue_temp_ptr, p); - // }; - // p += 1; - // } - for id in 2..(*pool).nsolvables { - let solvable_item = get_pool_solvables_value(pool, id as u32); - if !(*solvable_item).repo.is_null() && !is_pseudo_package(pool, solvable_item) { - queue_push(&mut self.queue_result, id); - }; - } - } - } - Ok(()) - } - // pub fn solv_apply_up_down_scope(&mut self, up: bool) -> Result<()> { - // let p = if self.package_names.is_none() { - // self.sack.solv_find_all_installed()? - // } else { - // let p = self.package_names.clone().unwrap(); - // self.sack.solv_find_installed_pkg_by_multiple_names(p)? - // }; - // Ok(()) - // } - pub fn solv_add_system_repo_filter(&mut self) -> Result<()> { - let pool = self.sack.pool; - unsafe { - queue_push2( - &mut self.queue_repo_filter as *mut Queue, - (SOLVER_SOLVABLE_REPO | SOLVER_SETREPO) as i32, - (*(*pool).installed).repoid, - ); - } - Ok(()) - } - pub fn solv_add_available_repo_filter(&mut self) -> Result<()> { - let pool = self.sack.pool; - unsafe { - for repoid in 1..(*pool).nrepos{ - let repo = pool_id2repo(pool, repoid); - if !repo.is_null() { - let repo_name = CStr::from_ptr((*repo).name).to_str()?; - if SYSTEM_REPO_NAME.to_lowercase() != repo_name.to_lowercase() { - queue_push2( - &mut self.queue_repo_filter as *mut Queue, - (SOLVER_SOLVABLE_REPO | SOLVER_SETREPO | SOLVER_SETVENDOR) as i32, - (*repo).repoid, - ); - } - } - } - } - Ok(()) - } - pub fn solv_apply_single_pkg_filter(&mut self, pkg_name: &str) -> Result<()> { - self.package_names = Some(vec![pkg_name.to_string()]); - Ok(()) - } - pub fn solv_generate_common_job(&mut self, select_flags:u32) -> Result { - let mut queue_job = init_queue(); - let queue_job_ptr = &mut queue_job as *mut Queue; - let pool = self.sack.pool; - match &self.package_names { - Some(pkgs) => { - for pkg in pkgs { - // let mut ret_flags = 0; - let mut flags = select_flags; - unsafe { - queue_empty(queue_job_ptr); - if pool.is_null() - || (*pool).solvables.is_null() - || (*pool).whatprovides.is_null() - { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - let pkg_ptr = CString::new(pkg.as_str()).unwrap(); - let mut ret_flags = - selection_make(pool, queue_job_ptr, pkg_ptr.as_ptr(), flags as i32); - if self.queue_repo_filter.count != 0 { - selection_filter( - pool, - queue_job_ptr, - &mut self.queue_repo_filter as *mut Queue, - ); - } - if queue_job.count == 0 { - flags = flags | SELECTION_NOCASE; - ret_flags = - selection_make(pool, queue_job_ptr, pkg_ptr.as_ptr(), flags as i32); - if self.queue_repo_filter.count != 0 { - selection_filter( - pool, - queue_job_ptr, - &mut self.queue_repo_filter as *mut Queue, - ); - } - if queue_job.count != 0 { - println!("[ignoring case for {}]", pkg.as_str()); - }() - } - if queue_job.count != 0 { - if (ret_flags & SELECTION_FILELIST as i32) != 0 { - println!("[using file list match for {}]", pkg.as_str()); - } - if (ret_flags & SELECTION_PROVIDES as i32) != 0 { - println!("[using capability match for {}]", pkg.as_str()); - } - queue_insertn( - &mut self.queue_job as *mut Queue, - self.queue_job.count, - queue_job.count, - queue_job.elements, - ); - } - } - } - } - None => { - if self.queue_repo_filter.count != 0 { - queue_empty(queue_job_ptr); - queue_push2(queue_job_ptr, SOLVER_SOLVABLE_ALL as i32, 0); - if self.queue_repo_filter.count != 0 { - unsafe { - selection_filter( - pool, - queue_job_ptr, - &mut self.queue_repo_filter as *mut Queue, - ); - queue_insertn( - &mut self.queue_job as *mut Queue, - self.queue_job.count, - queue_job.count, - queue_job.elements, - ); - } - } - } - } - } - Ok(0) - } -} -pub fn init_queue() -> Queue { - let mut queue = Queue { - elements: &mut 0 as *mut i32, - count: 0, - alloc: &mut 0 as *mut i32, - left: 0, - }; - unsafe { - queue_init(&mut queue as *mut Queue); - }; - queue -} -pub fn _solv_add_dist_upgrade_job(queue_job: &mut Queue) -> Result<()> { - queue_push2( - queue_job as *mut Queue, - (SOLVER_DISTUPGRADE | SOLVER_SOLVABLE_ALL) as i32, - 0, - ); - Ok(()) -} - -impl Solvsack { - pub fn solv_find_all_up_down_candidates( - &mut self, - installed_pkgs: &SolvPackageList, - _up: bool, - _queue_result: &Queue, - ) -> Result<()> { - let _queue_up_down = init_queue(); - let dw_size = installed_pkgs.get_size(); - for index in 0..dw_size { - let _id = installed_pkgs.get_pkg_id(index as u32); - } - Ok(()) - } -} diff --git a/rdnf/src/solv/rdnf_repo.rs b/rdnf/src/solv/rdnf_repo.rs deleted file mode 100644 index aa2569fabbcc53ed989f9e3eedd815e578167faf..0000000000000000000000000000000000000000 --- a/rdnf/src/solv/rdnf_repo.rs +++ /dev/null @@ -1,211 +0,0 @@ -use std::{ - ffi::{c_long, CString}, - fs::{create_dir_all, rename}, - mem::size_of_val, -}; - -use anyhow::{bail, Result}; -use libc::{ - c_void, fchmod, fclose, fdopen, fopen, fread, fseek, fwrite, mkstemp, rewind, FILE, SEEK_END, -}; -use solv_sys::ffi::{ - repo_add_repomdxml, repo_add_rpmmd, repo_add_solv, repo_add_updateinfoxml, repo_write, s_Repo, - solv_xfopen, REPO_EXTEND_SOLVABLES, _IO_FILE, -}; - -use crate::{ - default::{SOLVCACHE_DIR_NAME, SOLV_COOKIE_LEN}, - errors::{ - ERROR_RDNF_ADD_SOLV, ERROR_RDNF_INVALID_PARAMETER, ERROR_RDNF_REPO_WRITE, - ERROR_RDNF_SOLV_IO, - }, - repomd::RepoMd, -}; - -use super::SolvRepoInfoInernal; -pub fn solv_user_metadata_cache( - solv_repo_info: &SolvRepoInfoInernal, - cache_file_path: &str, -) -> Result { - unsafe { - let cache_file_ptr = CString::new(cache_file_path).unwrap(); - let mode = CString::new("r").unwrap(); - let fp = fopen(cache_file_ptr.as_ptr(), mode.as_ptr()); - if fp.is_null() { - return Ok(false); - } - let mut temp_cookie: [u8; SOLV_COOKIE_LEN] = [0; SOLV_COOKIE_LEN]; - let temp_cookie_ptr = temp_cookie.as_mut_ptr(); - let off_set = size_of_val(&temp_cookie); - let off_set_neg = off_set as i32 * -1; - if fseek(fp, off_set_neg as c_long, SEEK_END) != 0 - || fread(temp_cookie_ptr as *mut c_void, off_set, 1, fp) != 1 - { - bail!(ERROR_RDNF_SOLV_IO); - } - let cookie = solv_repo_info.cookie.unwrap(); - if temp_cookie != cookie { - bail!(ERROR_RDNF_SOLV_IO); - } - rewind(fp); - if repo_add_solv(solv_repo_info.repo, fp as *mut _IO_FILE, 0) != 0 { - bail!(ERROR_RDNF_ADD_SOLV); - }; - fclose(fp); - } - Ok(true) -} - -pub fn solv_read_yum_repo( - p_repo: &*mut s_Repo, - repo_md_file: String, - repo_md: RepoMd, -) -> Result<()> { - solv_load_repomd(*p_repo, &repo_md_file)?; - if let Some(primary) = repo_md.primary { - solv_load_repomd_primary(*p_repo, primary.location.as_str())?; - } - if let Some(filelists) = repo_md.filelists { - solv_load_repomd_filelists(*p_repo, filelists.location.as_str())?; - } - if let Some(updateinfo) = repo_md.updateinfo { - solv_load_repomd_updateinfo(*p_repo, updateinfo.location.as_str())?; - } - if let Some(other) = repo_md.other { - solv_load_repomd_other(*p_repo, other.location.as_str())?; - } - Ok(()) -} -pub fn solv_load_repomd(p_repo: *mut s_Repo, repo_md_file: &String) -> Result<()> { - unsafe { - let file_name = CString::new(repo_md_file.as_str()).unwrap(); - let mode = CString::new("r").unwrap(); - let fp = fopen(file_name.as_ptr(), mode.as_ptr()); - if fp.is_null() { - println!("a {}", repo_md_file); - bail!(ERROR_RDNF_SOLV_IO); - } - if repo_add_repomdxml(p_repo, fp as *mut _IO_FILE, 0) != 0 { - println!("b {}", repo_md_file); - bail!(ERROR_RDNF_SOLV_IO); - } - fclose(fp as *mut FILE); - } - Ok(()) -} -pub fn solv_load_repomd_primary(p_repo: *mut s_Repo, primary: &str) -> Result<()> { - unsafe { - let psz_primary = CString::new(primary).unwrap(); - let mode = CString::new("r").unwrap(); - let fp = solv_xfopen(psz_primary.as_ptr(), mode.as_ptr()); - if fp.is_null() { - bail!(ERROR_RDNF_SOLV_IO) - } - if repo_add_rpmmd(p_repo, fp, 0 as *const i8, 0) != 0 { - println!("c"); - bail!(ERROR_RDNF_SOLV_IO) - }; - fclose(fp as *mut FILE); - } - Ok(()) -} -pub fn solv_load_repomd_filelists(p_repo: *mut s_Repo, filelists: &str) -> Result<()> { - let psz_filelists = CString::new(filelists).unwrap(); - let mode = CString::new("r").unwrap(); - let language = CString::new("FL").unwrap(); - unsafe { - let fp = solv_xfopen(psz_filelists.as_ptr(), mode.as_ptr()); - if fp.is_null() { - println!("e {}", filelists); - bail!(ERROR_RDNF_SOLV_IO) - } - if repo_add_rpmmd(p_repo, fp, language.as_ptr(), REPO_EXTEND_SOLVABLES as i32) != 0 { - println!("f {}", filelists); - bail!(ERROR_RDNF_SOLV_IO) - } - fclose(fp as *mut FILE); - } - Ok(()) -} -pub fn solv_load_repomd_updateinfo(p_repo: *mut s_Repo, updateinfo: &str) -> Result<()> { - let psz_updateinfo = CString::new(updateinfo).unwrap(); - let mode = CString::new("r").unwrap(); - unsafe { - let fp = solv_xfopen(psz_updateinfo.as_ptr(), mode.as_ptr()); - if fp.is_null() { - println!("g {}", updateinfo); - bail!(ERROR_RDNF_SOLV_IO) - } - if repo_add_updateinfoxml(p_repo, fp, 0) != 0 { - println!("h {}", updateinfo); - bail!(ERROR_RDNF_SOLV_IO) - } - fclose(fp as *mut FILE); - } - Ok(()) -} -pub fn solv_load_repomd_other(p_repo: *mut s_Repo, other: &str) -> Result<()> { - let psz_other = CString::new(other).unwrap(); - let mode = CString::new("r").unwrap(); - let language = CString::new("en").unwrap(); - unsafe { - let fp = solv_xfopen(psz_other.as_ptr(), mode.as_ptr()); - if fp.is_null() { - println!("i {}", other); - bail!(ERROR_RDNF_SOLV_IO) - } - if repo_add_rpmmd(p_repo, fp, language.as_ptr(), REPO_EXTEND_SOLVABLES as i32) != 0 { - println!("j {}", other); - bail!(ERROR_RDNF_SOLV_IO) - } - fclose(fp as *mut FILE); - } - Ok(()) -} - -pub fn solv_create_metadata_cache( - solv_repo_info: &SolvRepoInfoInernal, - repo_id: &str, -) -> Result<()> { - let solv_cache_dir = solv_repo_info.repo_cache_dir.clone().unwrap() + SOLVCACHE_DIR_NAME; - create_dir_all(solv_cache_dir.clone())?; - let temp_solv_file = solv_cache_dir.clone() + "/" + ".newsolv-XXXXXX"; - let temp_solv_file_ptr = CString::new(temp_solv_file.as_str()).unwrap().into_raw(); - unsafe { - let fd = mkstemp(temp_solv_file_ptr); - if fd < 0 { - println!("k {}", repo_id); - bail!(ERROR_RDNF_SOLV_IO); - } - fchmod(fd, 0o444); - let mode = CString::new("w").unwrap(); - let fp = fdopen(fd, mode.as_ptr()); - if fp.is_null() { - println!("l {}", repo_id); - bail!(ERROR_RDNF_SOLV_IO); - } - let p_repo = solv_repo_info.repo; - if repo_write(p_repo, fp as *mut _IO_FILE) != 0 { - bail!(ERROR_RDNF_REPO_WRITE); - } - let cookie = solv_repo_info.cookie.unwrap(); - if fwrite(cookie.as_ptr() as *const c_void, SOLV_COOKIE_LEN, 1, fp) != 1 { - // println!("m {}", repo_id); - bail!(ERROR_RDNF_SOLV_IO) - } - if (*p_repo).pool.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - // let solvables_start=(*(*p_repo).pool).solvables; - // slice::from_raw_parts(solvables_start, len); - // let temp_solv_file = CStr::from_ptr(temp_solv_file).to_str().unwrap(); - let temp_solv_file = CString::from_raw(temp_solv_file_ptr); - let solv_file_path = solv_cache_dir + "/" + repo_id + ".solv"; - rename(temp_solv_file.to_str().unwrap(), solv_file_path.clone())?; - // let mut perms=fs::metadata(solv_file_path.clone())?.permissions(); - // perms.set_readonly(true); - // fs::set_permissions(solv_file_path.clone(), perms)?; - } - - Ok(()) -} diff --git a/rdnf/src/solv/sack.rs b/rdnf/src/solv/sack.rs deleted file mode 100644 index aedd245a87444cd7e560d281aa10cba888e07673..0000000000000000000000000000000000000000 --- a/rdnf/src/solv/sack.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::{ - ffi::{c_uint, CStr, CString}, - io::Read, - mem::size_of, - os::raw::c_void, -}; - -use anyhow::{bail, Result}; -use libc::{snprintf, fclose}; -use solv_sys::ffi::{ - pool_create, pool_createwhatprovides, pool_set_flag, pool_set_installed, pool_set_rootdir, - pool_setarch, repo_create, s_Pool, solv_chksum_add, solv_chksum_create, solv_chksum_free, - solv_knownid_REPOKEY_TYPE_SHA256, Repo, POOL_FLAG_ADDFILEPROVIDESFILTERED, _IO_FILE, REPO_REUSE_REPODATA, RPM_ADD_WITH_HDRID, REPO_USE_ROOTDIR, repo_add_rpmdb_reffp, -}; - -use crate::{ - conf::ConfigMain, - default::{SOLV_COOKIE_IDENT, SOLV_COOKIE_LEN}, - errors::{ERROR_RDNF_INVALID_PARAMETER, ERROR_RDNF_SOLV_CHKSUM}, - Cli, -}; - -const SYSTEM_REPO_NAME: &str = "@System"; -#[derive(Debug, Clone)] -pub struct Solvsack { - pub pool: *mut s_Pool, - pub dw_num_of_command_pkgs: usize, - pub cachedir: String, - pub rootdir: String, -} -unsafe impl Send for Solvsack {} -impl Solvsack { - pub fn from(conf: &ConfigMain, cli: &Cli,) -> Result { - unsafe { - let pool = pool_create(); - if pool.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - let root = CString::new(cli.installroot.as_str()).unwrap_or(CString::new("/").unwrap()); - pool_set_rootdir(pool, root.as_ptr()); - let evr = CString::new(conf.var_base_arch.as_str()).unwrap(); - pool_setarch(pool, evr.as_ptr()); - pool_set_flag(pool, POOL_FLAG_ADDFILEPROVIDESFILTERED as i32, 1); - let system_repo = CString::new(SYSTEM_REPO_NAME).unwrap(); - let repo: *mut Repo = repo_create(pool, system_repo.as_ptr()); - if !cli.alldeps { - let cache_dir = CString::new(conf.cachedir.as_str()).unwrap(); - let mode = CString::new("r").unwrap(); - let p_cache_file = libc::fopen(cache_dir.as_ptr(), mode.as_ptr()); - let dw_flags=REPO_REUSE_REPODATA | RPM_ADD_WITH_HDRID | REPO_USE_ROOTDIR; - if repo_add_rpmdb_reffp(repo, p_cache_file as *mut _IO_FILE, dw_flags as i32) != 0 { - bail!("Failed to init solvack,can't open rpmdb"); - }; - if !p_cache_file.is_null() { - fclose(p_cache_file); - } - } - if repo.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - pool_set_installed(pool, repo); - pool_createwhatprovides(pool); - Ok(Solvsack { - pool, - dw_num_of_command_pkgs: 0, - cachedir: conf.cachedir.clone(), - rootdir: cli.installroot.clone(), - }) - } - } -} -pub fn solv_create_cache_name(name: &String, url: &String) -> Result { - unsafe { - let p_chk_sum = solv_chksum_create(solv_knownid_REPOKEY_TYPE_SHA256.try_into().unwrap()); - if p_chk_sum.is_null() { - bail!(ERROR_RDNF_SOLV_CHKSUM); - } - let psz_url = CString::new(url.as_str()).unwrap(); - solv_chksum_add( - p_chk_sum, - psz_url.as_ptr() as *const c_void, - libc::strlen(psz_url.as_ptr()) as i32, - ); - let mut p_cookie: [u8; SOLV_COOKIE_LEN] = [0; SOLV_COOKIE_LEN]; - let mut psz_cookie: [u8; 9] = [0; 9]; - solv_chksum_free(p_chk_sum, p_cookie.as_mut_ptr() as *mut u8); - snprintf( - psz_cookie.as_mut_ptr() as *mut i8, - size_of::<[i8; 9]>(), - "%.2x%.2x%.2x%.2x".as_ptr() as *const i8, - p_cookie[0] as c_uint, - p_cookie[1] as c_uint, - p_cookie[2] as c_uint, - p_cookie[3] as c_uint, - ); - let cookie = CStr::from_ptr(psz_cookie.as_ptr() as *const i8) - .to_str() - .unwrap() - .to_string(); - Ok(format!("{}-{}", name, cookie)) - } -} -pub fn solv_calcuate_cookie_for_file(path: &str) -> Result<[u8; SOLV_COOKIE_LEN]> { - let mut file = std::fs::File::open(path)?; - let mut buf = [0u8; 8192]; - unsafe { - let p_chk_sum = solv_chksum_create(solv_knownid_REPOKEY_TYPE_SHA256.try_into().unwrap()); - if p_chk_sum.is_null() { - bail!(ERROR_RDNF_SOLV_CHKSUM); - } - let ident = CString::new(SOLV_COOKIE_IDENT).unwrap(); - solv_chksum_add( - p_chk_sum, - ident.as_ptr() as *const c_void, - libc::strlen(ident.as_ptr()) as i32, - ); - loop { - let len = file.read(&mut buf)?; - if len <= 0 { - break; - } - solv_chksum_add(p_chk_sum, buf.as_ptr() as *const c_void, len as i32); - buf = [0u8; 8192]; - } - let mut p_cookie: [u8; SOLV_COOKIE_LEN] = [0; SOLV_COOKIE_LEN]; - solv_chksum_free(p_chk_sum, p_cookie.as_mut_ptr() as *mut u8); - Ok(p_cookie) - } -} diff --git a/rdnf/src/solve/mod.rs b/rdnf/src/solve/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..aedfe111f410f0237ad6bac9fa4d9d10e121a4c1 --- /dev/null +++ b/rdnf/src/solve/mod.rs @@ -0,0 +1,506 @@ +mod solve; +mod solve_conflict; +mod solve_obsolete; +mod solve_require; +mod solve_term; +use std::{ + cmp::Ordering, + collections::{BTreeMap, HashMap, HashSet, VecDeque}, + hash::Hash, + sync::{ + atomic::{self, AtomicIsize}, + Arc, + }, +}; + +use crate::{ + cache::{ + installed::InstalledRepo, + model::{EntryDetail, FileDetail}, + ver::Version, + }, + repo::{CmdRepo, Repo}, +}; +use anyhow::{bail, Result}; +use tokio::sync::Mutex; +use uuid::Uuid; +use varisat::{CnfFormula, ExtendFormula, Lit}; +#[derive(Debug, Clone, Copy)] +pub enum TransationState { + Installed, + Upgrade, + Downgrade, + ToInstall, + Obsolete(isize), + Conflict(isize), + // ConflictInstalled, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone)] +pub(self) struct SolveItem { + entry_name: Arc, + ver: Version, + flag: Option, + // kind: EntryKind, +} +impl SolveItem { + fn new(entry: EntryDetail) -> Self { + let ver = Version::from_entry_detail(&entry); + Self { + entry_name: Arc::new(entry.entry_name), + ver, + flag: entry.flags, + } + } + fn check_version(&self, ver: &Version, flag: &Option) -> bool { + Version::check_version(&self.ver, ver, &self.flag, flag) + } +} + +struct Assume { + conflict_installed: Vec, + necessary: Vec, + installed: HashSet, +} +type PkgIndex = isize; +type EntryIndex = isize; +type TermIndex = isize; +#[derive(Debug, PartialEq, Eq, Clone, Hash, Copy)] +struct PkgIndexInfo { + pkg_index: isize, + arch_priority: u8, + repo_priority: i64, + // ver: Option, +} + +impl Ord for PkgIndexInfo { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match self.partial_cmp(other) { + Some(ord) => ord, + None => Ordering::Equal, + } + } +} +impl PartialOrd for PkgIndexInfo { + fn partial_cmp(&self, other: &Self) -> Option { + match other.arch_priority.partial_cmp(&self.arch_priority) { + Some(std::cmp::Ordering::Equal) => {} + ord => return ord, + } + match other.repo_priority.partial_cmp(&self.repo_priority) { + Some(std::cmp::Ordering::Equal) => {} + ord => { + return ord; + } + } + match self.pkg_index.partial_cmp(&other.pkg_index) { + Some(std::cmp::Ordering::Equal) => Some(std::cmp::Ordering::Equal), + ord => { + return ord; + } + } + // self.ver.partial_cmp(&other.ver) + } +} +#[derive(Clone)] +pub struct SolveContext { + pub repos: Arc>, + pub installed_repo: InstalledRepo, + pub cmd_repo: CmdRepo, + formula: Arc>, + index_seq: Arc, + appeared: Arc>>, + queue: Arc>>, + index_map_pkginfo: Arc>>, + uuid_map_index: Arc>>, + entry_require: Arc)>>>, + + index_map_entry: Arc>>, + index_map_term: Arc>>, + arch_priority: HashMap<&'static str, u8>, + repo_priority: HashMap, + assume: Arc>, + same_pkg_name: Arc>>>, // may be cause error +} +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub enum RepoType { + Repo(Arc), + Installed, + Cmd, +} +#[derive(Debug, Clone)] +struct SolvePkgInfo { + state: TransationState, + repo: RepoType, + pkg_name: Arc, + pkg_uuid: Uuid, + conflict: HashSet, + obsolete: HashSet, +} +fn gen_arch_priority() -> HashMap<&'static str, u8> { + let mut arch_priprity = HashMap::<&str, u8>::new(); + arch_priprity.insert("i386", 0); + arch_priprity.insert("i686", 0); + arch_priprity.insert("x86_64", 2); + arch_priprity.insert("noarch", 3); + arch_priprity +} +impl SolveContext { + pub fn new( + repos: Vec, + installed_repo: InstalledRepo, + cmd_repo: CmdRepo, + // arch: Option, + ) -> Self { + let assume = Assume { + conflict_installed: Vec::new(), + necessary: Vec::new(), + installed: HashSet::new(), + }; + let mut repo_priority = HashMap::new(); + repo_priority.insert(RepoType::Cmd, std::i64::MAX); + repo_priority.insert(RepoType::Installed, std::i64::MAX - 1); + for ele in repos.iter() { + repo_priority.insert( + RepoType::Repo(ele.config.name.to_owned()), + ele.config.base.priority, + ); + } + SolveContext { + repos: Arc::new(repos), + installed_repo, + cmd_repo, + formula: Arc::new(Mutex::new(CnfFormula::new())), + appeared: Arc::new(Mutex::new(HashSet::new())), + queue: Arc::new(Mutex::new(VecDeque::new())), + index_map_pkginfo: Arc::new(Mutex::new(BTreeMap::new())), + index_map_entry: Arc::new(Mutex::new(BTreeMap::new())), + index_map_term: Arc::new(Mutex::new(BTreeMap::new())), + uuid_map_index: Arc::new(Mutex::new(HashMap::new())), + arch_priority: gen_arch_priority(), + repo_priority, + assume: Arc::new(Mutex::new(assume)), + entry_require: Arc::new(Mutex::new(HashMap::new())), + index_seq: Arc::new(AtomicIsize::new(1)), + same_pkg_name: Arc::new(Mutex::new(HashMap::new())), + } + } + + pub async fn add_pkg_from_repo(&mut self, pkg_name: Arc) -> Result<()> { + //TODO maybe more than one repo have the same pkg_name, + //let user choose which repo, for example aur yay + //Default choose the repository with the highest priority. + let (repo, pkg_uuid) = self + .which_repo_provide_pkg(pkg_name.clone()) + .await? + .pop() + .unwrap(); + let repo = RepoType::Repo(repo.config.name.clone()); + dbg!(repo.clone()); + if self.check_appeared(pkg_uuid).await { + return Ok(()); + }; + let pkg_index = self + .add_pkg_info( + pkg_name.clone(), + pkg_uuid, + repo.clone(), + TransationState::ToInstall, + ) + .await; + // let mut assume_lock = self.assume.lock().await; + // assume_lock.necessary.push(pkg_index); + // drop(assume_lock); + self.formula + .lock() + .await + .add_clause(&vec![Lit::from_dimacs(pkg_index)]); + + self.add_pkg(pkg_index, pkg_uuid, repo).await?; + Ok(()) + } + pub async fn add_pkg_from_cmd(&mut self, pkg_infos: Vec<(Uuid, String)>) -> Result<()> { + for (pkg_uuid, pkg_name) in pkg_infos { + if !self.check_appeared(pkg_uuid).await { + let pkg_index = self + .add_pkg_info( + Arc::new(pkg_name), + pkg_uuid, + RepoType::Cmd, + TransationState::ToInstall, + ) + .await; + self.add_pkg(pkg_index, pkg_uuid, RepoType::Cmd).await?; + } + } + Ok(()) + } + async fn add_pkg(&mut self, pkg_index: isize, pkg_uuid: Uuid, repo: RepoType) -> Result<()> { + self.add_queue(pkg_index, pkg_uuid, repo).await; + while let Some(r) = self.pop_queue().await { + self.solve_require(r.clone()).await?; + self.solve_conflict(r.clone()).await?; + self.solve_obsolete(r).await?; + } + Ok(()) + } + async fn which_repo_provide_pkg(&self, pkg_name: Arc) -> Result> { + let mut handles = Vec::new(); + let pkg_name = Arc::new(pkg_name.to_string()); + for repo in self.repos.iter() { + handles.push(( + repo, + repo.data.clone().get_pkg_uuid_by_pkg_name(pkg_name.clone()), + )); + } + + let mut res = Vec::new(); + for (repo, handle) in handles { + if let Some(pkg_uuid) = handle { + res.push((repo.clone(), pkg_uuid)); + } + } + if res.is_empty() { + bail!("No repo provides the pkg {}", pkg_name); + } + res.sort_by(|a, b| a.0.config.base.priority.cmp(&b.0.config.base.priority)); + Ok(res) + } + + async fn which_repo_pkg_provide_entry( + &self, + entry_name: &str, + ) -> Result)>>> { + let mut handles = Vec::new(); + let entry = Arc::new(entry_name.to_string()); + for repo in self.repos.iter() { + handles.push(( + RepoType::Repo(repo.config.name.clone()), + tokio::spawn( + repo.data + .clone() + .get_what_pkg_provide_entry_by_name(entry.clone()), + ), + )); + } + let mut provides = Vec::new(); + for (repo, handle) in handles { + if let Some(provide) = handle.await? { + provides.push((repo, provide)); + } + } + if provides.is_empty() { + Ok(None) + } else { + Ok(Some(provides)) + } + } + async fn which_repo_pkg_provide_file( + &self, + file_path: &str, + ) -> Result)>>> { + let mut handles = Vec::new(); + let file_path = Arc::new(file_path.to_string()); + for repo in self.repos.iter() { + handles.push(( + RepoType::Repo(repo.config.name.clone()), + tokio::spawn( + repo.data + .clone() + .get_what_pkg_provide_file_by_path(file_path.clone()), + ), + )); + } + let mut provides = Vec::new(); + for (repo, handle) in handles { + if let Some(provide) = handle.await? { + provides.push((repo, provide)); + } + } + if provides.is_empty() { + Ok(None) + } else { + Ok(Some(provides)) + } + } + async fn installed_try_update_or_downgrade( + &mut self, + pkg_name: Arc, + item: &mut SolveItem, + installed_ver: &Version, + ) -> Result { + let mut res = false; + if let Ok(s) = self.which_repo_provide_pkg(pkg_name.clone()).await { + let mut handles = Vec::new(); + for (repo, pkg_uuid) in s { + handles.push(( + RepoType::Repo(repo.config.name.clone()), + pkg_uuid, + tokio::spawn(repo.data.get_pkg_detail_by_uuid(pkg_uuid)), + )); + } + + let mut indexs = Vec::new(); + for (repo, pkg_uuid, handle) in handles { + if let Some(pkg_detail) = handle.await? { + if !item.check_version(&pkg_detail.version, &Some("EQ".to_string())) { + let state = match pkg_detail.version.partial_cmp(installed_ver) { + Some(Ordering::Greater) => TransationState::Upgrade, + Some(Ordering::Less) => TransationState::Downgrade, + _ => { + continue; + } + }; + let index = self + .add_pkg_info(pkg_name.clone(), pkg_uuid, repo, state) + .await; + indexs.push(index); + }; + } + } + if !indexs.is_empty() { + res = true; + self.update_appeared(&indexs).await; + } + } + Ok(res) + } + fn get_latest_index(&self) -> isize { + // self.global_index_seq.fetch_add(1, atomic::Ordering::SeqCst) + self.index_seq.fetch_add(1, atomic::Ordering::SeqCst) + } + fn get_arch_priority(&self, arch: &str) -> u8 { + match self.arch_priority.get(arch) { + Some(s) => *s, + None => 0, + } + } + fn get_repo_priority(&self, repo: &RepoType) -> i64 { + match self.repo_priority.get(&repo) { + Some(s) => *s, + None => std::i64::MIN, + } + } + async fn check_appeared(&mut self, pkg_uuid: Uuid) -> bool { + let mut appeared_lock = self.appeared.lock().await; + let r = match appeared_lock.get(&pkg_uuid) { + Some(_) => true, + None => { + appeared_lock.insert(pkg_uuid); + false + } + }; + drop(appeared_lock); + r + } + async fn add_queue(&mut self, index: isize, pkg_uuid: Uuid, repo: RepoType) { + let mut queue_lock = self.queue.lock().await; + queue_lock.push_back((index, pkg_uuid, repo)); + drop(queue_lock); + } + async fn pop_queue(&mut self) -> Option<(isize, Uuid, RepoType)> { + let mut queue_lock = self.queue.lock().await; + let r = queue_lock.pop_front(); + drop(queue_lock); + r + } + async fn add_pkg_info( + &mut self, + pkg_name: Arc, + pkg_uuid: Uuid, + repo: RepoType, + state: TransationState, + ) -> PkgIndex { + let mut uuid_map_index_lock = self.uuid_map_index.lock().await; + let mut index_map_pkginfo_lock = self.index_map_pkginfo.lock().await; + let index = match uuid_map_index_lock.get(&pkg_uuid) { + Some(index) => { + match state { + TransationState::Conflict(i) => { + if let Some(pkg_info) = index_map_pkginfo_lock.get_mut(index) { + pkg_info.conflict.insert(i); + } + } + TransationState::Obsolete(i) => { + if let Some(pkg_info) = index_map_pkginfo_lock.get_mut(index) { + pkg_info.obsolete.insert(i); + } + } + _ => {} + } + index.to_owned() + } + None => { + let index = self.get_latest_index(); + uuid_map_index_lock.insert(pkg_uuid, index); + let mut conflict = HashSet::new(); + let mut obsolete = HashSet::new(); + match state { + TransationState::Conflict(i) => { + conflict.insert(i); + } + TransationState::Obsolete(i) => { + obsolete.insert(i); + } + _ => {} + } + index_map_pkginfo_lock.insert( + index, + SolvePkgInfo { + state, + pkg_name, + pkg_uuid, + repo, + conflict, + obsolete, + }, + ); + index + } + }; + drop(index_map_pkginfo_lock); + drop(uuid_map_index_lock); + + return index; + } + async fn update_appeared(&mut self, indexs: &Vec) { + let map_lock = self.index_map_pkginfo.lock().await; + let mut appeared_lock = self.appeared.lock().await; + let mut queue_lock = self.queue.lock().await; + for index in indexs { + match map_lock.get(&index) { + Some(pkg_info) => match pkg_info.state { + TransationState::ToInstall + | TransationState::Downgrade + | TransationState::Upgrade => { + if !appeared_lock.contains(&pkg_info.pkg_uuid) { + appeared_lock.insert(pkg_info.pkg_uuid); + queue_lock.push_back(( + *index, + pkg_info.pkg_uuid, + pkg_info.repo.clone(), + )); + } + } + _ => {} + }, + None => {} + } + } + } + async fn update_installed(&mut self, pkg_index: isize) { + let mut assume_lock = self.assume.lock().await; + assume_lock.installed.insert(pkg_index); + drop(assume_lock); + } +} +// async fn is_provide_pkg(repo_data: RepoData, pkg_name: String) -> Option { +// repo_data.get_pkg_uuid_by_pkg_name(pkg_name.as_str()).await +// } + +// pub async fn get_formulat_by_package( +// repos: Vec>, +// installed_repo: InstalledRepo, +// ) -> Result<()> { +// Ok(()) +// } diff --git a/rdnf/src/solve/solve.rs b/rdnf/src/solve/solve.rs new file mode 100644 index 0000000000000000000000000000000000000000..e49d23961f7cde70504092f4dc7499f4c564f08e --- /dev/null +++ b/rdnf/src/solve/solve.rs @@ -0,0 +1,242 @@ +use std::{ + collections::{BTreeMap, HashSet}, + sync::{Arc, Mutex}, +}; + +use async_recursion::async_recursion; +use tokio::sync::MutexGuard; +use varisat::{ExtendFormula, Lit, Solver}; + +use crate::solve::TransationState; + +use super::{SolveContext, SolvePkgInfo}; +use anyhow::Result; +impl SolveContext { + pub async fn solve(&self) -> Result<()> { + let mut formula_lock = self.formula.lock().await; + let assume_lock = self.assume.lock().await; + let mut assume = HashSet::new(); + for ele in assume_lock.conflict_installed.iter() { + assume.insert(*ele); + } + for ele in assume_lock.necessary.iter() { + assume.insert(*ele); + } + for ele in assume_lock.installed.iter() { + assume.insert(*ele); + } + let mut entry_req_lock = self.entry_require.lock().await; + for (solve_item, (entry_index, pkg_infos)) in entry_req_lock.iter_mut() { + pkg_infos.sort(); + let mut lits = pkg_infos + .iter() + .map(|x| Lit::from_dimacs(x.pkg_index)) + .collect::>(); + if pkg_infos.len() == 0 { + dbg!(solve_item); + } + lits.push(!Lit::from_dimacs(*entry_index)); + formula_lock.add_clause(&lits); + } + + let same_pkg_lock = self.same_pkg_name.lock().await; + // for (pkg_name, index_set) in same_pkg_lock.iter() { + // let index_vec = index_set.iter().collect::>(); + // if pkg_name == "perl-libs" { + // continue; + // } + // if index_vec.len() >= 2 { + // for head in 0..index_vec.len() - 1 { + // for tail in (head + 1)..index_vec.len() { + // formula_lock.add_clause(&[ + // !Lit::from_dimacs(index_vec[head].abs()), + // !Lit::from_dimacs(index_vec[tail].abs()), + // ]); + // } + // } + // } + // } + drop(same_pkg_lock); + + let assume = assume + .iter() + .map(|x| Lit::from_dimacs(*x as isize)) + .collect::>(); + let mut solver = Solver::new(); + + solver.add_formula(&formula_lock); + solver.assume(&assume); + let pkginfos = self.index_map_pkginfo.lock().await; + // pkginfos.clone(); + let mut pkg_infos = BTreeMap::new(); + for (pkg_index, pkg_info) in pkginfos.iter() { + pkg_infos.insert(*pkg_index, (*pkg_info).to_owned()); + } + let appeared = Vec::new(); + self.clone() + .solve_assume(Arc::new(Mutex::new(solver)), assume, pkg_infos, appeared)?; + Ok(()) + } + // #[async_recursion] + fn solve_assume( + self, + solver: Arc>, + assume: Vec, + pkginfos: BTreeMap, + mut appeared: Vec, + ) -> Result<()> { + let mut solver_lock = solver.lock().unwrap(); + solver_lock.assume(&assume); + if solver_lock.solve()? { + // let pkginfos = self.index_map_pkginfo.lock(); + let mut to_install = Vec::new(); + let mut to_upgrade = Vec::new(); + match solver_lock.model() { + Some(v) => { + for ele in v { + if let Some(p) = pkginfos.get(&(ele.to_dimacs().abs())) { + if ele.is_positive() { + match p.state { + TransationState::ToInstall => { + if let Some(_s) = self + .installed_repo + .data + .clone() + .get_pkg_uuid_by_pkg_name(p.pkg_name.clone()) + { + to_upgrade.push(p); + } else { + to_install.push(p); + }; + } + TransationState::Upgrade => { + to_upgrade.push(p); + } + _ => {} + } + } + }; + } + } + None => {} + } + to_install.sort_by(|a, b| a.pkg_name.cmp(&b.pkg_name)); + to_upgrade.sort_by(|a, b| a.pkg_name.cmp(&b.pkg_name)); + let to_install_name = to_install + .iter() + .map(|x| x.pkg_name.as_str()) + .collect::>(); + let to_upgrade_name = to_upgrade + .iter() + .map(|x| x.pkg_name.as_str()) + .collect::>(); + dbg!(to_install_name); + dbg!(to_upgrade_name); + } else { + dbg!("error"); + // for ele in formula_lock.iter() { + // if ele.len() <= 1 { + // // dbg!(ele); + // let index = &ele[0].to_dimacs().abs(); + // if let Some(s) = self.index_map_pkginfo.lock().await.get(&index) { + // dbg!(s); + // }; + // if let Some(s) = self.index_map_entry.lock().await.get(index) { + // dbg!(s); + // } + // if let Some(s) = self.index_map_term.lock().await.get(index) { + // dbg!(s); + // } + // } + // } + match solver_lock.failed_core() { + Some(s) => { + // dbg!(s); + // for ele in s { + let mut res = false; + for ele in s { + if !appeared.contains(ele) { + appeared.push(*ele); + } else { + res = true; + } + } + if res { + let assume = assume + .iter() + .filter(|x| !s.contains(*x)) + .map(|x| *x) + .collect::>(); + self.clone() + .solve_assume(solver.clone(), assume, pkginfos, appeared)?; + } + + // let index = ele.to_dimacs(); + // if let Some(s) = self.index_map_pkginfo.lock().await.get(&index) { + // dbg!(s); + // }; + // if let Some(s) = self.index_map_entry.lock().await.get(&index) { + // dbg!(s); + // } + // if let Some(s) = self.index_map_term.lock().await.get(&index) { + // dbg!(s); + // } + // } + + // for index in s { + // let mut assume = HashSet::new(); + // for ele in assume_lock.conflict_installed.iter() { + // assume.insert(*ele); + // } + // for ele in assume_lock.necessary.iter() { + // assume.insert(*ele); + // } + // for ele in assume_lock.installed.iter() { + // if index.to_dimacs().abs() != *ele { + // assume.insert(*ele); + // } + // } + // } + } + None => {} + } + // let p = solver.model().unwrap(); + // dbg!(p); + // solver.config(config_update); + }; + Ok(()) + } +} +#[cfg(test)] +mod tests { + // use crate::Rdnf; + #[test] + fn test_set() { + let v = vec![0, 1, 2]; + // let index_vec = index_set.iter().collect::>(); + // for head in 0..index_vec.len() - 1 { + // for tail in head..index_vec.len() { + // formula_lock.add_clause(&[ + // !Lit::from_dimacs(*index_vec[head]), + // !Lit::from_dimacs(*index_vec[tail]), + // ]); + // } + // } + for head in 0..v.len() - 1 { + for tail in (head + 1)..v.len() { + dbg!(vec![v[head], v[tail]]); + } + } + } + // async fn new_context() { + // let _repos = Rdnf::make_cache( + // self.repo_confs, + // self.conf.config_main.clone(), + // self.cli, + // self.term, + // ) + // .await?; + // let installed_repo = + // InstalledRepo::open(&self.conf.config_main, DEFAULT_RPMDB_LOCATION).await?; + // } +} diff --git a/rdnf/src/solve/solve_conflict.rs b/rdnf/src/solve/solve_conflict.rs new file mode 100644 index 0000000000000000000000000000000000000000..23a85969a042a02fa0cabb958a96cd0aebae4666 --- /dev/null +++ b/rdnf/src/solve/solve_conflict.rs @@ -0,0 +1,253 @@ +use std::{ + collections::HashSet, + ops::{Deref, DerefMut}, + sync::Arc, +}; + +use crate::cache::{model::EntryDetail, ver::Version}; + +use super::{RepoType, SolveContext, SolveItem, TransationState}; +use anyhow::Result; +use uuid::Uuid; +use varisat::{ExtendFormula, Lit}; + +struct ConflictItem { + clause: Vec, + pkg_index: isize, + item: SolveItem, +} +impl ConflictItem { + fn new(entry: EntryDetail, pkg_index: isize) -> Self { + ConflictItem { + item: SolveItem::new(entry), + clause: Vec::new(), + pkg_index, + } + } + fn add_clause(&mut self, pkg_index: isize) { + self.clause.push(pkg_index); + } +} +impl Deref for ConflictItem { + type Target = SolveItem; + + fn deref(&self) -> &Self::Target { + &self.item + } +} +impl DerefMut for ConflictItem { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.item + } +} +impl SolveContext { + async fn get_pkg_conflicts_by_uuid( + &self, + repo: RepoType, + pkg_uuid: Uuid, + ) -> Option> { + match repo { + RepoType::Repo(name) => { + self.repos + .iter() + .find(|r| r.config.name == name) + .unwrap() + .get_pkg_conflicts_by_uuid(pkg_uuid) + .await + } + RepoType::Cmd => self.cmd_repo.get_pkg_conflicts_by_uuid(pkg_uuid).await, + RepoType::Installed => { + self.installed_repo + .get_pkg_conflicts_by_uuid(pkg_uuid) + .await + } + } + } + pub async fn solve_conflict( + &mut self, + (pkg_index, pkg_uuid, repo): (isize, Uuid, RepoType), + ) -> Result<()> { + let mut handles = Vec::new(); + if let Some(conflicts) = self.get_pkg_conflicts_by_uuid(repo, pkg_uuid).await { + for entry in conflicts { + handles.push(tokio::spawn( + self.clone() + .solve_conflict_item(ConflictItem::new(entry, pkg_index)), + )); + } + }; + + let mut indexs = Vec::new(); + for handle in handles { + let mut r = handle.await??; + indexs.append(&mut r); + } + let mut formula_lock = self.formula.lock().await; + for index in indexs + .iter() + .map(|x| *x) + .collect::>() + .iter() + .filter(|x| **x != pkg_index) + { + formula_lock.add_clause(&[ + !Lit::from_dimacs(pkg_index as isize), + !Lit::from_dimacs(*index as isize), + ]); + } + drop(formula_lock); + Ok(()) + } + + async fn solve_conflict_item(mut self, mut conflict: ConflictItem) -> Result> { + self.cmd_check_conflict(&mut conflict).await; + self.installed_check_conflict(&mut conflict).await?; + self.repo_check_conflict(&mut conflict).await?; + Ok(conflict.clause) + } + async fn cmd_check_conflict(&mut self, item: &mut ConflictItem) { + if let Some(cmd) = self + .cmd_repo + .data + .clone() + .get_what_pkg_provide_entry_by_name(item.entry_name.clone()) + .await + { + for (pkg_uuid, entry_detail) in cmd { + let cmd_ver = Version::from_entry_detail(&entry_detail); + let cmd_flag = entry_detail.flags; + if item.check_version(&cmd_ver, &cmd_flag) { + let index = self + .add_pkg_info( + Arc::new(entry_detail.pkg_name), + pkg_uuid, + RepoType::Cmd, + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + }; + } + } else if item.entry_name.starts_with("/") { + if let Some(cmd) = self + .cmd_repo + .data + .clone() + .get_what_pkg_provide_file_by_path(item.entry_name.clone()) + .await + { + for (pkg_uuid, file_detail) in cmd { + let index = self + .add_pkg_info( + Arc::new(file_detail.pkg_name), + pkg_uuid, + RepoType::Cmd, + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + } + } + } + } + async fn installed_check_conflict(&mut self, item: &mut ConflictItem) -> Result<()> { + let mut conflict_installeds = Vec::new(); + if let Some(installed) = self + .installed_repo + .data + .clone() + .get_what_pkg_provide_entry_by_name(item.entry_name.clone()) + .await + { + for (pkg_uuid, entry) in installed { + let installed_ver = Version::from_entry_detail(&entry); + let installed_flag = entry.flags; + let pkg_name = Arc::new(entry.pkg_name); + if item.check_version(&installed_ver, &installed_flag) { + let index = self + .add_pkg_info( + pkg_name.clone(), + pkg_uuid, + RepoType::Installed, + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + if !self + .installed_try_update_or_downgrade(pkg_name, item, &installed_ver) + .await? + { + conflict_installeds.push(index); + }; + }; + } + } else if item.entry_name.starts_with("/") { + if let Some(installed) = self + .installed_repo + .data + .clone() + .get_what_pkg_provide_file_by_path(item.entry_name.clone()) + .await + { + for (pkg_uuid, file_detail) in installed { + let index = self + .add_pkg_info( + Arc::new(file_detail.pkg_name), + pkg_uuid, + RepoType::Installed, + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + conflict_installeds.push(index); + } + } + } + let mut assume_lock = self.assume.lock().await; + assume_lock + .conflict_installed + .append(&mut conflict_installeds); + drop(assume_lock); + Ok(()) + } + + async fn repo_check_conflict(&mut self, item: &mut ConflictItem) -> Result<()> { + if let Some(v) = self.which_repo_pkg_provide_entry(&item.entry_name).await? { + for (repo, provides) in v { + for (pkg_uuid, entry_detail) in provides { + let repo_ver = Version::from_entry_detail(&entry_detail); + let repo_flag = entry_detail.flags; + if item.check_version(&repo_ver, &repo_flag) { + let index = self + .add_pkg_info( + Arc::new(entry_detail.pkg_name), + pkg_uuid, + repo.clone(), + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + }; + } + } + } else if item.entry_name.starts_with("/") { + if let Some(v) = self.which_repo_pkg_provide_file(&item.entry_name).await? { + for (repo, provides) in v { + for (pkg_uuid, file_detail) in provides { + let index = self + .add_pkg_info( + Arc::new(file_detail.pkg_name), + pkg_uuid, + repo.clone(), + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + } + } + } + } + + Ok(()) + } +} diff --git a/rdnf/src/solve/solve_obsolete.rs b/rdnf/src/solve/solve_obsolete.rs new file mode 100644 index 0000000000000000000000000000000000000000..9119912169ea596368a65ffcb04d30fa25b1b052 --- /dev/null +++ b/rdnf/src/solve/solve_obsolete.rs @@ -0,0 +1,238 @@ +use std::{ + collections::HashSet, + ops::{Deref, DerefMut}, + sync::Arc, +}; + +use crate::cache::{model::EntryDetail, ver::Version}; + +use super::{RepoType, SolveContext, SolveItem, TransationState}; +use anyhow::Result; +use uuid::Uuid; +use varisat::{ExtendFormula, Lit}; +struct ObsoleteItem { + clause: Vec, + pkg_index: isize, + item: SolveItem, +} +impl ObsoleteItem { + fn new(entry: EntryDetail, pkg_index: isize) -> Self { + Self { + clause: Vec::new(), + pkg_index, + item: SolveItem::new(entry), + } + } + fn add_clause(&mut self, pkg_index: isize) { + self.clause.push(pkg_index); + } +} +impl Deref for ObsoleteItem { + type Target = SolveItem; + + fn deref(&self) -> &Self::Target { + &self.item + } +} +impl DerefMut for ObsoleteItem { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.item + } +} +impl SolveContext { + async fn get_pkg_obsoletes_by_uuid( + &self, + repo: RepoType, + pkg_uuid: Uuid, + ) -> Option> { + match repo { + RepoType::Repo(name) => { + self.repos + .iter() + .find(|r| r.config.name == name) + .unwrap() + .get_pkg_obsoletes_by_uuid(pkg_uuid) + .await + } + RepoType::Cmd => self.cmd_repo.get_pkg_obsoletes_by_uuid(pkg_uuid).await, + RepoType::Installed => { + self.installed_repo + .get_pkg_obsoletes_by_uuid(pkg_uuid) + .await + } + } + } + pub async fn solve_obsolete( + &mut self, + (pkg_index, pkg_uuid, repo): (isize, Uuid, RepoType), + ) -> Result<()> { + let mut handles = Vec::new(); + if let Some(obsoletes) = self.get_pkg_obsoletes_by_uuid(repo, pkg_uuid).await { + for entry in obsoletes { + handles.push(tokio::spawn( + self.clone() + .solve_obsolete_item(ObsoleteItem::new(entry, pkg_index)), + )); + } + } + let mut indexs = Vec::new(); + for handle in handles { + let mut r = handle.await??; + indexs.append(&mut r); + } + let mut formula_lock = self.formula.lock().await; + for index in indexs + .iter() + .map(|x| *x) + .collect::>() + .iter() + .filter(|x| **x != pkg_index) + { + formula_lock.add_clause(&[ + !Lit::from_dimacs(pkg_index as isize), + !Lit::from_dimacs(*index as isize), + ]); + } + drop(formula_lock); + Ok(()) + } + async fn solve_obsolete_item(mut self, mut obsolete: ObsoleteItem) -> Result> { + self.cmd_check_obsolete(&mut obsolete).await; + self.installed_check_obsolete(&mut obsolete).await?; + self.repo_check_obsolete(&mut obsolete).await?; + Ok(obsolete.clause) + } + async fn cmd_check_obsolete(&mut self, item: &mut ObsoleteItem) { + if let Some(cmd) = self + .cmd_repo + .data + .clone() + .get_what_pkg_provide_entry_by_name(item.entry_name.clone()) + .await + { + for (pkg_uuid, entry_detail) in cmd { + let cmd_ver = Version::from_entry_detail(&entry_detail); + let cmd_flag = entry_detail.flags; + if item.check_version(&cmd_ver, &cmd_flag) { + let index = self + .add_pkg_info( + Arc::new(entry_detail.pkg_name), + pkg_uuid, + RepoType::Cmd, + TransationState::Obsolete(item.pkg_index), + ) + .await; + item.add_clause(index); + } + } + } else if item.entry_name.starts_with("/") { + if let Some(cmd) = self + .cmd_repo + .data + .clone() + .get_what_pkg_provide_file_by_path(item.entry_name.clone()) + .await + { + for (pkg_uuid, file_detail) in cmd { + let index = self + .add_pkg_info( + Arc::new(file_detail.pkg_name), + pkg_uuid, + RepoType::Cmd, + TransationState::Obsolete(item.pkg_index), + ) + .await; + item.add_clause(index); + } + }; + }; + } + async fn installed_check_obsolete(&mut self, item: &mut ObsoleteItem) -> Result<()> { + if let Some(installed) = self + .installed_repo + .data + .clone() + .get_what_pkg_provide_entry_by_name(item.entry_name.clone()) + .await + { + for (pkg_uuid, entry) in installed { + let installed_ver = Version::from_entry_detail(&entry); + let installed_flag = entry.flags; + let pkg_name = Arc::new(entry.pkg_name); + if item.check_version(&installed_ver, &installed_flag) { + let index = self + .add_pkg_info( + pkg_name.clone(), + pkg_uuid, + RepoType::Installed, + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + self.installed_try_update_or_downgrade(pkg_name, item, &installed_ver) + .await?; + }; + } + } else if item.entry_name.starts_with("/") { + if let Some(installed) = self + .installed_repo + .data + .clone() + .get_what_pkg_provide_file_by_path(item.entry_name.clone()) + .await + { + for (pkg_uuid, file_detail) in installed { + let index = self + .add_pkg_info( + Arc::new(file_detail.pkg_name), + pkg_uuid, + RepoType::Installed, + TransationState::Conflict(item.pkg_index), + ) + .await; + item.add_clause(index); + } + }; + } + Ok(()) + } + async fn repo_check_obsolete(&mut self, item: &mut ObsoleteItem) -> Result<()> { + if let Some(v) = self.which_repo_pkg_provide_entry(&item.entry_name).await? { + for (repo, provides) in v { + for (pkg_uuid, entry_detail) in provides { + let repo_ver = Version::from_entry_detail(&entry_detail); + let repo_flag = entry_detail.flags; + if item.check_version(&repo_ver, &repo_flag) { + let index = self + .add_pkg_info( + Arc::new(entry_detail.pkg_name), + pkg_uuid, + repo.clone(), + TransationState::Obsolete(item.pkg_index), + ) + .await; + item.add_clause(index); + }; + } + } + } else if item.entry_name.starts_with("/") { + if let Some(v) = self.which_repo_pkg_provide_file(&item.entry_name).await? { + for (repo, provides) in v { + for (pkg_uuid, file_detail) in provides { + let index = self + .add_pkg_info( + Arc::new(file_detail.pkg_name), + pkg_uuid, + repo.clone(), + TransationState::Obsolete(item.pkg_index), + ) + .await; + item.add_clause(index); + } + } + } + } + + Ok(()) + } +} diff --git a/rdnf/src/solve/solve_require.rs b/rdnf/src/solve/solve_require.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d445e9a4fb09c7f083c1fba2ff02d55c1480951 --- /dev/null +++ b/rdnf/src/solve/solve_require.rs @@ -0,0 +1,530 @@ +use std::{ + cmp::Ordering, + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use crate::cache::{model::EntryDetail, ver::Version}; +use anyhow::Result; + +use uuid::Uuid; +use varisat::{ExtendFormula, Lit}; + +use super::{ + EntryIndex, PkgIndex, PkgIndexInfo, RepoType, SolveContext, SolveItem, TransationState, +}; + +fn get_pkg_index( + satisfied_pkg: &mut HashMap<(String, Option), PkgIndexInfo>, +) -> Vec { + // for ele in satisfied_pkg { + + // } + let mut pkgs = satisfied_pkg + .iter() + .map(|x| *x.1) + .collect::>() + .iter() + .map(|x| *x) + .collect::>(); + pkgs.sort(); + pkgs + // let mut pkgs = satisfied_pkg + // .iter() + // .map(|(_, (pkg_index, priority))| (*pkg_index, *priority)) + // .collect::>(); + // pkgs.sort_by(|a, b| b.1.cmp(&a.1)); + // pkgs.iter() + // .map(|(pkg_index, priority)| *pkg_index) + // .collect::>() +} + +impl SolveContext { + async fn get_pkg_requires_by_uuid( + &self, + repo: RepoType, + pkg_uuid: Uuid, + ) -> Option> { + match repo { + RepoType::Repo(name) => { + self.repos + .iter() + .find(|r| r.config.name == name) + .unwrap() + .get_pkg_requires_by_uuid(pkg_uuid) + .await + } + RepoType::Cmd => self.cmd_repo.get_pkg_requires_by_uuid(pkg_uuid).await, + RepoType::Installed => self.installed_repo.get_pkg_requires_by_uuid(pkg_uuid).await, + } + } + async fn get_pkg_version_by_uuid(&self, repo: &RepoType, pkg_uuid: Uuid) -> Option { + match repo { + RepoType::Repo(name) => { + self.repos + .iter() + .find(|r| r.config.name.as_str() == name.as_str()) + .unwrap() + .data + .clone() + .get_pkg_version_by_uuid(pkg_uuid) + .await + } + RepoType::Cmd => { + self.cmd_repo + .data + .clone() + .get_pkg_version_by_uuid(pkg_uuid) + .await + } + RepoType::Installed => { + self.installed_repo + .data + .clone() + .get_pkg_version_by_uuid(pkg_uuid) + .await + } + } + } + pub(super) async fn solve_require( + &mut self, + (pkg_index, pkg_uuid, repo): (PkgIndex, Uuid, RepoType), + ) -> Result<()> { + let mut handle_items = Vec::new(); + let mut handle_terms = Vec::new(); + if let Some(reqs) = self.get_pkg_requires_by_uuid(repo, pkg_uuid).await { + for require in reqs { + if !require.entry_name.starts_with("(") { + handle_items.push(tokio::spawn( + self.clone().solve_require_item(SolveItem::new(require)), + )); + } else { + handle_terms.push(tokio::spawn( + self.clone().solve_require_term(require.entry_name), + )); + } + } + } + let mut entries = Vec::new(); + for handle in handle_items { + entries.push(handle.await??.0); + } + for handle in handle_terms { + entries.push(handle.await??.0); + } + let mut formula_lock = self.formula.lock().await; + for entry_index in entries.iter() { + formula_lock + .add_clause(&[Lit::from_dimacs(*entry_index), !Lit::from_dimacs(pkg_index)]); + } + drop(formula_lock); + Ok(()) + } + + pub(super) async fn solve_require_item( + mut self, + mut require_item: SolveItem, + ) -> Result<(EntryIndex, Vec)> { + let entry_require_lock = self.entry_require.lock().await; + let r = match entry_require_lock.get(&require_item) { + Some((entry_index, v)) => (*entry_index, v.to_owned()), + None => { + drop(entry_require_lock); + let mut satisfied_pkg: HashMap<(String, Option), PkgIndexInfo> = + HashMap::new(); + self.cmd_satisfy_requie(&mut require_item, &mut satisfied_pkg) + .await; + self.installed_satisfy_require(&mut require_item, &mut satisfied_pkg) + .await?; + // if satisfied_pkg.is_empty() { + self.repo_satisfy_require(&mut require_item, &mut satisfied_pkg) + .await?; + // } + + let mut pkg_infos = get_pkg_index(&mut satisfied_pkg); + pkg_infos.sort(); + + let mut same_pkg_name_lock = self.same_pkg_name.lock().await; + for ((pkg_name, _ver), pkg_info) in satisfied_pkg { + // if pkg_name == "perl-libs" { + // dbg!(&require_item); + // dbg!(pkg_info); + // } + match same_pkg_name_lock.get_mut(&pkg_name) { + Some(set) => { + set.insert(pkg_info.pkg_index); + } + None => { + let mut set = HashSet::new(); + set.insert(pkg_info.pkg_index); + same_pkg_name_lock.insert(pkg_name, set); + } + } + } + + drop(same_pkg_name_lock); + + let entry_index = self.get_latest_index(); + self.update_appeared(&pkg_infos.iter().map(|x| x.pkg_index).collect::>()) + .await; + let mut entry_require_lock_again = self.entry_require.lock().await; + let mut index_entry_lock = self.index_map_entry.lock().await; + index_entry_lock.insert(entry_index, require_item.clone()); + entry_require_lock_again.insert(require_item, (entry_index, pkg_infos.clone())); + drop(entry_require_lock_again); + drop(index_entry_lock); + (entry_index, pkg_infos) + } + }; + Ok(r) + } + + async fn cmd_satisfy_requie( + &mut self, + item: &mut SolveItem, + satisfied_pkg: &mut HashMap<(String, Option), PkgIndexInfo>, + ) { + if let Some(cmd_install) = self + .cmd_repo + .data + .clone() + .get_what_pkg_provide_entry_by_name(item.entry_name.clone()) + .await + { + for (pkg_uuid, entry) in cmd_install { + let cmd_install_ver = Version::from_entry_detail(&entry); + let cmd_install_flag = entry.flags; + if item.check_version(&cmd_install_ver, &cmd_install_flag) { + self.handle_satisfy( + &entry.arch, + entry.pkg_name.clone(), + pkg_uuid, + Some(cmd_install_ver.clone()), + None, + RepoType::Cmd, + satisfied_pkg, + ) + .await; + } + } + } else if item.entry_name.starts_with("/") { + if let Some(cmd_install) = self + .cmd_repo + .data + .clone() + .get_what_pkg_provide_file_by_path(item.entry_name.clone()) + .await + { + for (pkg_uuid, file_detail) in cmd_install { + self.handle_satisfy( + &file_detail.arch, + file_detail.pkg_name.clone(), + pkg_uuid, + None, + Some(TransationState::ToInstall), + RepoType::Cmd, + satisfied_pkg, // satiesfied, + ) + .await; + } + }; + } + } + async fn handle_satisfy( + &mut self, + arch: &str, + pkg_name: String, + pkg_uuid: Uuid, + ver: Option, + state: Option, + repo: RepoType, + satisfied_pkg: &mut HashMap<(String, Option), PkgIndexInfo>, + ) { + let ver = match ver { + Some(v) => match &v.version { + Some(_) => Some(v), + None => self.get_pkg_version_by_uuid(&repo, pkg_uuid).await, + }, + None => self.get_pkg_version_by_uuid(&repo, pkg_uuid).await, + }; + let repo_priority = self.get_repo_priority(&repo); + let state = match state { + Some(s) => s, + None => match &repo { + RepoType::Installed => TransationState::Installed, + RepoType::Repo(_) => TransationState::ToInstall, + RepoType::Cmd => TransationState::ToInstall, + }, + }; + + let arch = self.get_arch_priority(arch); + if arch != 0 { + match satisfied_pkg.get_mut(&(pkg_name.clone(), ver.clone())) { + Some(pkg_info) => { + if arch > pkg_info.arch_priority && repo_priority >= pkg_info.repo_priority { + pkg_info.pkg_index = self + .add_pkg_info(Arc::new(pkg_name.to_owned()), pkg_uuid, repo, state) + .await; + match state { + TransationState::Installed => { + self.update_installed(pkg_info.pkg_index).await; + } + _ => {} + } + pkg_info.arch_priority = arch; + pkg_info.repo_priority = repo_priority; + } + } + None => { + let pkg_index = self + .add_pkg_info(Arc::new(pkg_name.to_owned()), pkg_uuid, repo, state) + .await; + match state { + TransationState::Installed => { + self.update_installed(pkg_index).await; + } + _ => {} + } + satisfied_pkg.insert( + (pkg_name, ver), + PkgIndexInfo { + pkg_index, + arch_priority: arch, + repo_priority, + }, + ); + } + } + } + } + async fn installed_satisfy_require( + &mut self, + item: &mut SolveItem, + satisfied_pkg: &mut HashMap<(String, Option), PkgIndexInfo>, + ) -> Result<()> { + if let Some(installeds) = self + .installed_repo + .data + .clone() + .get_what_pkg_provide_entry_by_name(item.entry_name.clone()) + .await + { + for (pkg_uuid, entry) in installeds { + let installed_ver = Version::from_entry_detail(&entry); + let installed_flag = entry.flags; + if item.check_version(&installed_ver, &installed_flag) { + self.handle_satisfy( + &entry.arch, + entry.pkg_name.clone(), + pkg_uuid, + Some(installed_ver.clone()), + None, + RepoType::Installed, + satisfied_pkg, + ) + .await; + } else { + self.installed_update_or_downgrade( + Arc::new(entry.pkg_name), + item, + Some(&installed_ver), + satisfied_pkg, + ) + .await?; + } + } + } else if item.entry_name.starts_with("/") { + if let Some(installed) = self + .installed_repo + .data + .clone() + .get_what_pkg_provide_file_by_path(item.entry_name.clone()) + .await + { + for (pkg_uuid, file_detail) in installed { + self.handle_satisfy( + &file_detail.arch, + file_detail.pkg_name.clone(), + pkg_uuid, + None, + None, + RepoType::Installed, + satisfied_pkg, // satiesfied, + ) + .await; + // self.installed_update_or_downgrade( + // Arc::new(file_detail.pkg_name), + // item, + // None, + // satisfied_pkg, + // ) + // .await?; + } + }; + }; + Ok(()) + } + async fn installed_update_or_downgrade( + &mut self, + pkg_name: Arc, + require_item: &mut SolveItem, + installed_ver: Option<&Version>, + satisfied_pkg: &mut HashMap<(String, Option), PkgIndexInfo>, + ) -> Result<()> { + if let Ok(s) = self.which_repo_provide_pkg(pkg_name.clone()).await { + let mut handles = Vec::new(); + for (repo, pkg_uuid) in s { + handles.push(( + RepoType::Repo(repo.config.name.clone()), + pkg_uuid, + tokio::spawn(repo.data.get_pkg_provides_by_uuid(pkg_uuid)), + )); + } + for (repo, pkg_uuid, handle) in handles { + if let Some(provides) = handle.await? { + if let Some(entry) = provides + .iter() + .find(|e| e.entry_name == require_item.entry_name.as_str()) + { + let entry_ver = Version::from_entry_detail(entry); + if require_item.check_version(&entry_ver, &entry.flags) { + let state = match installed_ver { + Some(installed) => match entry_ver.partial_cmp(installed) { + Some(Ordering::Greater) => Some(TransationState::Upgrade), + Some(Ordering::Less) => Some(TransationState::Downgrade), + _ => { + continue; + } + }, + None => None, + }; + self.handle_satisfy( + &entry.arch, + entry.pkg_name.clone(), + pkg_uuid, + Some(entry_ver), + state, + repo, + satisfied_pkg, + ) + .await; + }; + } + } + } + // for (repo, pkg_uuid, handle) in handles { + // if let Some(pkg_detail) = handle.await? { + // if require_item.check_version(&pkg_detail.version, &Some("EQ".to_string())) { + // let state = match installed_ver { + // Some(installed) => match pkg_detail.version.partial_cmp(installed) { + // Some(Ordering::Greater) => Some(TransationState::Upgrade), + // Some(Ordering::Less) => Some(TransationState::Downgrade), + // _ => None, + // }, + // None => None, + // }; + // self.handle_satisfy( + // &pkg_detail.arch, + // pkg_detail.name, + // pkg_uuid, + // Some(pkg_detail.version), + // state, + // repo, + // satisfied_pkg, + // ) + // .await; + // }; + // } + // } + } + Ok(()) + } + // async fn installed_but_ver_not_satisfied( + // &mut self, + // pkg_name: Arc, + // item: &mut RequireItem, + // installed_ver: &Version, + // satiesfied: &mut HashMap, + // ) -> Result<()> { + // if let Ok(mut s) = self.which_repo_provide_pkg(pkg_name.clone()).await { + // let mut handles = Vec::new(); + // //which_repo_provide_pkg sort_by repo's priority + // //if multiple software packages with the same name and arch meet the requirements, + // //select the package with the highest repo's highest priority. + // while let Some((repo, pkg_uuid)) = s.pop() { + // handles.push(( + // RepoType::Repo(repo.config.name.clone()), + // pkg_uuid, + // tokio::spawn(repo.data.get_pkg_detail_by_uuid(pkg_uuid)), + // )); + // } + // for (repo_name, pkg_uuid, handle) in handles { + // if let Some(pkg_detail) = handle.await? { + // if item.check_version(&pkg_detail.version, &Some("EQ".to_string())) { + // let name_arch = (pkg_name.to_string(), pkg_detail.arch.clone()); + // if !satiesfied.contains(&name_arch) { + // let state = match pkg_detail.version.partial_cmp(installed_ver) { + // Some(Ordering::Greater) => TransationState::Upgrade, + // Some(Ordering::Less) => TransationState::Downgrade, + // _ => { + // continue; + // } + // }; + // let index = self + // .add_pkg_info(pkg_name.clone(), pkg_uuid, repo_name, state) + // .await; + // satiesfied.insert(name_arch); + // item.add_index_with_arch(index, pkg_detail.arch); + // }; + // }; + // }; + // } + // } + // Ok(()) + // } + async fn repo_satisfy_require( + &mut self, + item: &mut SolveItem, + satisfied_pkg: &mut HashMap<(String, Option), PkgIndexInfo>, + ) -> Result<()> { + if let Some(v) = self.which_repo_pkg_provide_entry(&item.entry_name).await? { + for (repo, provides) in v { + for (pkg_uuid, entry) in provides { + let provide_ver = Version::from_entry_detail(&entry); + let provide_flag = entry.flags; + if item.check_version(&provide_ver, &provide_flag) { + self.handle_satisfy( + &entry.arch, + entry.pkg_name, + pkg_uuid, + Some(provide_ver), + None, + repo.clone(), + satisfied_pkg, + ) + .await + }; + } + } + } else if item.entry_name.starts_with("/") { + if let Some(v) = self.which_repo_pkg_provide_file(&item.entry_name).await? { + for (repo, provides) in v { + for (pkg_uuid, entry) in provides { + self.handle_satisfy( + &entry.arch, + entry.pkg_name, + pkg_uuid, + None, + None, + repo.clone(), + satisfied_pkg, + ) + .await; + } + } + } + } + + Ok(()) + } +} diff --git a/rdnf/src/solve/solve_term.rs b/rdnf/src/solve/solve_term.rs new file mode 100644 index 0000000000000000000000000000000000000000..e31df3304f4f29162dea04f6d3b49585ac0d2d36 --- /dev/null +++ b/rdnf/src/solve/solve_term.rs @@ -0,0 +1,614 @@ +use std::{mem::replace, sync::Arc}; + +use super::{PkgIndexInfo, SolveContext, SolveItem, TermIndex}; +use crate::cache::ver::Version; +use anyhow::{bail, Result}; +use async_recursion::async_recursion; +use varisat::{ExtendFormula, Lit}; +#[derive(Debug, PartialEq)] +enum Operation { + And, + Or, + If, + Else, + With, + Without, + Unless, +} +// impl Operation { +// fn from_str(s: &str) -> Option { +// let s = s.trim_start().trim_end(); +// match s { +// "and" => Some(Operation::And), +// "or" => Some(Operation::Or), +// "if" => Some(Operation::If), +// "else" => Some(Operation::Else), +// "with" => Some(Operation::With), +// "without" => Some(Operation::Without), +// "unless" => Some(Operation::Unless), +// _ => None, +// } +// } +// } +#[derive(Debug, PartialEq)] +enum State { + Entry(String), + Term(String), + Op(Operation), +} + +impl SolveContext { + #[async_recursion] + pub(super) async fn solve_require_term( + mut self, + term: String, + ) -> Result<(TermIndex, Option>)> { + let term_index = self.get_latest_index(); + let (ops, term_entry_indexs, pkgs) = self.parse_term(&term).await?; + + if ops.contains(&Operation::Or) { + let mut lits = term_entry_indexs + .iter() + .map(|x| Lit::from_dimacs(*x)) + .collect::>(); + lits.insert(0, !Lit::from_dimacs(term_index)); + let mut formula_lock = self.formula.lock().await; + formula_lock.add_clause(&lits); + drop(formula_lock); + let mut pkgs_union = Vec::new(); + for ele in pkgs { + union(&mut pkgs_union, &ele); + } + let pkgs = if pkgs_union.is_empty() { + None + } else { + Some(pkgs_union) + }; + return Ok((term_index, pkgs)); + } + let lits_vec = if ops.contains(&Operation::And) { + let mut lits = Vec::new(); + for ele in term_entry_indexs { + lits.push(vec![!Lit::from_dimacs(term_index), Lit::from_dimacs(ele)]); + } + lits + } else if ops.contains(&Operation::If) && ops.contains(&Operation::Else) { + // m if p else n + if term_entry_indexs.len() != 3 { + bail!( + "Failed to parse term {} , 'm if p else n ' should be three operands", + term + ); + } + vec![ + vec![ + !Lit::from_dimacs(term_index), + !Lit::from_dimacs(term_entry_indexs[1]), + Lit::from_dimacs(term_entry_indexs[0]), + ], + vec![ + !Lit::from_dimacs(term_index), + Lit::from_dimacs(term_entry_indexs[1]), + Lit::from_dimacs(term_entry_indexs[2]), + ], + ] + } else if ops.contains(&Operation::If) { + // m if p + if term_entry_indexs.len() != 2 { + bail!( + "Failed to parse term {} , 'm if p' should be two operands", + term + ); + }; + vec![vec![ + !Lit::from_dimacs(term_index), + !Lit::from_dimacs(term_entry_indexs[1]), + Lit::from_dimacs(term_entry_indexs[0]), + ]] + } else if ops.contains(&Operation::With) { + // pkgA-foo with pkgA-bar + // if term == "(python3.11dist(pyasn1) < 0.5~~ with python3.11dist(pyasn1) >= 0.4.6)" { + // dbg!(&pkgs); + // } + // if term == "(gcc >= 12 with gcc < 13)" { + // dbg!(&pkgs); + // } + // let mut pkg_with = Vec::new(); + // for ele in pkgs { + // with(&mut pkg_with, &ele); + // } + let mut pkg_with = pkgs[0].clone(); + for i in 1..pkgs.len() { + with(&mut pkg_with, &pkgs[i]); + } + pkg_with.sort(); + // dbg!(&pkg_with); + let mut lits = pkg_with + .iter() + .map(|x| Lit::from_dimacs(x.pkg_index)) + .collect::>(); + lits.insert(0, !Lit::from_dimacs(term_index)); + vec![lits] + } else if ops.contains(&Operation::Without) { + let mut pkg_without = pkgs[0].clone(); + for i in 1..pkgs.len() { + without(&mut pkg_without, &pkgs[i]); + } + pkg_without.sort(); + let mut lits = pkg_without + .iter() + .map(|x| Lit::from_dimacs(x.pkg_index)) + .collect::>(); + lits.insert(0, !Lit::from_dimacs(term_index)); + vec![lits] + } else if ops.contains(&Operation::Unless) && ops.contains(&Operation::Else) { + // m unless p else n + //t->(p->n) !r,!p,n + //t->(!p->m) !r,p,m + if term_entry_indexs.len() != 3 { + bail!( + "Failed to parse term {} , 'm unless p else n ' should be three operands", + term + ); + } + vec![ + vec![ + !Lit::from_dimacs(term_index), + !Lit::from_dimacs(term_entry_indexs[1]), + Lit::from_dimacs(term_entry_indexs[2]), + ], + vec![ + !Lit::from_dimacs(term_index), + Lit::from_dimacs(term_entry_indexs[1]), + Lit::from_dimacs(term_entry_indexs[0]), + ], + ] + } else if ops.contains(&Operation::Unless) { + // m unless p + // r->!p->m !r,p,m + if term_entry_indexs.len() != 2 { + bail!( + "Failed to parse term {} , 'm unless p' should be two operands", + term + ); + }; + vec![vec![ + !Lit::from_dimacs(term_index), + Lit::from_dimacs(term_entry_indexs[1]), + Lit::from_dimacs(term_entry_indexs[0]), + ]] + } else { + let mut lits = term_entry_indexs + .iter() + .map(|x| Lit::from_dimacs(*x)) + .collect::>(); + lits.insert(0, !Lit::from_dimacs(term_index)); + vec![lits] + }; + let mut formula_lock = self.formula.lock().await; + for lits in lits_vec { + formula_lock.add_clause(&lits); + } + let mut index_term_lock = self.index_map_term.lock().await; + index_term_lock.insert(term_index, term); + drop(formula_lock); + Ok((term_index, None)) + } + async fn parse_term( + &mut self, + term: &str, + ) -> Result<(Vec, Vec, Vec>)> { + let states = parse_term_to_state(term); + if states.is_none() { + bail!("Can't parse term {}", term); + } + let states = states.unwrap(); + let mut term_entry_indexs = Vec::new(); + let mut pkg_index_infos = Vec::new(); + let mut ops = Vec::new(); + for ele in states { + match ele { + State::Entry(entry) => { + let (entry_index, pkg_indexs) = self + .clone() + .solve_require_item(SolveItem::from_str(&entry)) + .await?; + term_entry_indexs.push(entry_index); + pkg_index_infos.push(pkg_indexs); + // union(&mut pkg_index_infos, &pkg_indexs); + } + State::Term(sub_term) => { + let (term_index, pkg_indexs) = + self.clone().solve_require_term(sub_term).await?; + term_entry_indexs.push(term_index); + if let Some(pkg_indexs) = pkg_indexs { + pkg_index_infos.push(pkg_indexs); + // union(&mut pkg_index_infos, &pkg_indexs); + } + } + State::Op(op) => { + ops.push(op); + } + } + } + if term_entry_indexs.len() - ops.len() != 1 { + bail!( + "Failed to parse term {} , there are {} operands but {} term or entry", + term, + ops.len(), + term_entry_indexs.len() + ); + } + Ok((ops, term_entry_indexs, pkg_index_infos)) + } +} +fn parse_term_to_state(s: &str) -> Option> { + if let Some((_, rest)) = s.split_once('(') { + if let Some((rest, _)) = rest.rsplit_once(')') { + let mut states = Vec::new(); + let mut iter = rest.chars(); + let mut rest = String::new(); + let mut pre_char = ' '; + loop { + if let Some(ch) = iter.next() { + if pre_char.is_whitespace() && ch == '(' { + //get term + rest.clear(); + let mut term = String::new(); + let mut count = 1; + term.push('('); + loop { + if let Some(c) = iter.next() { + if c == '(' { + count += 1; + } else if c == ')' { + count -= 1; + } + term.push(c); + if count == 0 { + break; + } + } else { + break; + } + } + states.push(State::Term(term)); + } else { + rest.push(ch); + pre_char = ch; + let (entry, op) = get_op(rest.clone()); + if let Some(o) = op { + if let Some(p) = entry { + states.push(State::Entry(p)); + } + states.push(State::Op(o)); + rest.clear(); + } + } + } else { + break; + } + } + let rest = rest.trim_end().trim_start(); + if rest != "" { + states.push(State::Entry(rest.to_owned())); + } + return Some(states); + } + }; + None +} +fn get_op(s: String) -> (Option, Option) { + let s = s.trim_end(); + let (op, s) = if s.ends_with(" and") { + (Some(Operation::And), s.trim_end_matches("and")) + } else if s.ends_with(" or") { + (Some(Operation::Or), s.trim_end_matches("or")) + } else if s.ends_with(" if") { + (Some(Operation::If), s.trim_end_matches("if")) + } else if s.ends_with(" else") { + (Some(Operation::Else), s.trim_end_matches("else")) + } else if s.ends_with(" with") { + (Some(Operation::With), s.trim_end_matches("with")) + } else if s.ends_with(" without") { + (Some(Operation::Without), s.trim_end_matches("without")) + } else if s.ends_with(" unless") { + (Some(Operation::Unless), s.trim_end_matches("unless")) + } else { + (None, s) + }; + let entry = if op.is_some() { + let s = s.trim_end().trim_start(); + if s == "" { + None + } else { + Some(s.to_string()) + } + } else { + None + }; + (entry, op) +} + +impl SolveItem { + pub fn from_str(p: &str) -> Self { + let ((pkg_name, ver), flag) = if p.find("<=").is_some() { + (p.split_once("<=").unwrap(), "LE") + } else if p.find("<").is_some() { + (p.split_once("<").unwrap(), "LT") + } else if p.find(">=").is_some() { + (p.split_once(">=").unwrap(), "GE") + } else if p.find(">").is_some() { + (p.split_once(">").unwrap(), "GT") + } else if p.find("=").is_some() { + (p.split_once("=").unwrap(), "EQ") + } else { + ((p, ""), "") + }; + let flag = if flag == "" { + None + } else { + Some(flag.to_owned()) + }; + SolveItem { + entry_name: Arc::new(pkg_name.trim().to_owned()), + ver: Version::from_str(ver), + flag, + } + } +} + +fn with(left: &mut Vec, right: &Vec) { + let mut r = Vec::new(); + if left.len() > right.len() { + for ele in right { + if left.contains(ele) { + r.push(*ele); + }; + } + } else { + for ele in left.iter() { + if right.contains(ele) { + r.push(*ele); + }; + } + } + let _ = replace(left, r); +} +fn without(left: &mut Vec, right: &Vec) { + let mut r = Vec::new(); + for ele in left.iter() { + if !right.contains(ele) { + r.push(*ele); + } + } + // r + let _ = replace(left, r); +} +fn union(left: &mut Vec, right: &Vec) { + for ele in right { + if !left.contains(ele) { + left.push(*ele); + } + } +} +#[cfg(test)] +mod test { + use std::{mem::replace, sync::Arc}; + + use crate::{ + cache::ver::Version, + solve::{ + solve_term::{parse_term_to_state, without, Operation, State}, + PkgIndexInfo, SolveItem, + }, + }; + + #[test] + fn test_get_term() { + assert_eq!( + parse_term_to_state("((pam >= 1.3.1-15) if openssh)"), + Some(vec![ + State::Term("(pam >= 1.3.1-15)".to_owned()), + State::Op(Operation::If), + State::Entry("openssh".to_owned()), + ]) + ); + assert_eq!( + parse_term_to_state("(pam >= 1.3.1-15)"), + Some(vec![State::Entry("pam >= 1.3.1-15".to_owned()),]) + ); + assert_eq!( + parse_term_to_state("(( A(64) < 1 or B ) if C <= 2.3-fc37 else (D with E))"), + Some(vec![ + State::Term("( A(64) < 1 or B )".to_owned()), + State::Op(Operation::If), + State::Entry("C <= 2.3-fc37".to_owned()), + State::Op(Operation::Else), + State::Term("(D with E)".to_owned()) + ]) + ); + assert_eq!( + parse_term_to_state("(( A(64) < 1 or B ) and C <= 2.3-fc37 and D(64) < 2)"), + Some(vec![ + State::Term("( A(64) < 1 or B )".to_owned()), + State::Op(Operation::And), + State::Entry("C <= 2.3-fc37".to_owned()), + State::Op(Operation::And), + State::Entry("D(64) < 2".to_owned()) + ]) + ); + assert_eq!( + parse_term_to_state("((fcitx5-qt6(x86-64) = 5.0.15-1.fc37) if qt6-qtbase)"), + Some(vec![ + State::Term("(fcitx5-qt6(x86-64) = 5.0.15-1.fc37)".to_owned()), + State::Op(Operation::If), + State::Entry("qt6-qtbase".to_owned()) + ]) + ); + assert_eq!( + parse_term_to_state( + "((python3.11dist(google-api-core) < 2.1~~ or python3.11dist(google-api-core) >= 2.2) + with (python3.11dist(google-api-core) < 2.2~~ or python3.11dist(google-api-core) >= 2.3) + with (python3.11dist(google-api-core) < 2.3 or python3.11dist(google-api-core) > 2.3) + with (python3.11dist(google-api-core) < 2~~ or python3.11dist(google-api-core) >= 2.1) + with python3.11dist(google-api-core) < 3~~dev0 + with python3.11dist(google-api-core) >= 1.31.5)" + ), + Some(vec![State::Term( + "(python3.11dist(google-api-core) < 2.1~~ or python3.11dist(google-api-core) >= 2.2)" + .to_owned() + ), + State::Op(Operation::With), + State::Term("(python3.11dist(google-api-core) < 2.2~~ or python3.11dist(google-api-core) >= 2.3)".to_owned()), + State::Op(Operation::With), + State::Term("(python3.11dist(google-api-core) < 2.3 or python3.11dist(google-api-core) > 2.3)".to_owned()), + State::Op(Operation::With), + State::Term("(python3.11dist(google-api-core) < 2~~ or python3.11dist(google-api-core) >= 2.1)".to_owned()), + State::Op(Operation::With), + State::Entry("python3.11dist(google-api-core) < 3~~dev0".to_owned()), + State::Op(Operation::With), + State::Entry("python3.11dist(google-api-core) >= 1.31.5".to_owned()), + ]) + ) + } + + #[test] + fn test_from_str() { + assert_eq!( + SolveItem::from_str("python3.11dist(niapy) >= 1.24.1-1.fc37"), + SolveItem { + entry_name: Arc::new("python3.11dist(niapy)".to_owned()), + ver: Version { + epoch: None, + version: Some("1.24.1".to_owned()), + release: Some("1.fc37".to_owned()) + }, + flag: Some("GE".to_owned()) + } + ); + assert_eq!( + SolveItem::from_str("python3.11dist(niapy) <= 1.24.1"), + SolveItem { + entry_name: Arc::new("python3.11dist(niapy)".to_owned()), + ver: Version { + epoch: None, + version: Some("1.24.1".to_owned()), + release: None + }, + flag: Some("LE".to_owned()) + } + ); + } + #[test] + fn test_term() { + if let Some(s) = parse_term_to_state("((fcitx5-qt6(x86-64) = 5.0.15-1.fc37) if qt6-qtbase)") + { + if s.contains(&State::Op(Operation::If)) { + assert!(true) + } else { + assert!(false) + } + }; + } + fn with_a(left: &mut Vec, right: &Vec) { + let mut r = Vec::new(); + if left.len() > right.len() { + for ele in right { + if left.contains(ele) { + r.push(*ele); + }; + } + } else { + for ele in left.iter() { + if right.contains(ele) { + r.push(*ele); + }; + } + } + let _ = replace(left, r); + } + #[test] + fn test_with() { + let pkgs = vec![ + vec![PkgIndexInfo { + pkg_index: 812, + arch_priority: 2, + repo_priority: 0, + }], + vec![PkgIndexInfo { + pkg_index: 812, + arch_priority: 2, + repo_priority: 0, + }], + ]; + let mut pkg_with = pkgs[0].clone(); + for i in 1..pkgs.len() { + with_a(&mut pkg_with, &pkgs[i]); + } + + // for ele in pkgs { + // with_a(&mut pkg_with, &ele); + // } + pkg_with.sort(); + dbg!(&pkg_with); + } + #[test] + fn test_without() { + let mut left = vec![ + PkgIndexInfo { + pkg_index: 1, + arch_priority: 4, + repo_priority: 0, + }, + PkgIndexInfo { + pkg_index: 2, + arch_priority: 4, + repo_priority: 0, + }, + PkgIndexInfo { + pkg_index: 3, + arch_priority: 5, + repo_priority: 0, + }, + ]; + let right = vec![ + PkgIndexInfo { + pkg_index: 2, + arch_priority: 4, + repo_priority: 0, + }, + PkgIndexInfo { + pkg_index: 6, + arch_priority: 5, + repo_priority: 0, + }, + ]; + without(&mut left, &right); + assert_eq!( + left, + vec![ + PkgIndexInfo { + pkg_index: 1, + arch_priority: 4, + repo_priority: 0, + }, + PkgIndexInfo { + pkg_index: 3, + arch_priority: 5, + repo_priority: 0, + } + ] + ) + } + #[test] + fn test_op() { + let ops = vec![Operation::If, Operation::Else]; + // assert_eq!(ops, vec![Operation::If, Operation::Else]); + if ops == vec![Operation::If, Operation::Else] { + assert!(true) + } else { + assert!(false) + } + } +} diff --git a/rdnf/src/sub_command/cache.rs b/rdnf/src/sub_command/cache.rs deleted file mode 100644 index 5fbf1a60f353db8f3f0e7757b519ec4c5bde5379..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/cache.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::{ - fs::{metadata, read_dir, remove_dir, remove_file}, - path::PathBuf, - time::SystemTime, -}; - -use anyhow::{bail, Result}; -use console::style; - -use crate::{ - default::{CMDLINE_REPO_NAME, REPODATA_DIR_NAME, REPO_METADATA_MARKER, SOLVCACHE_DIR_NAME}, - errors::ERROR_RDNF_CACHE_REFRESH, - utils::check_root, - Rdnf, -}; -impl Rdnf { - pub fn make_cache(&mut self) -> Result<()> { - let mut new_repos = Vec::new(); - for repo in self.repos.clone() { - if repo.psz_name == CMDLINE_REPO_NAME || !repo.base.enabled { - new_repos.push(repo); - continue; - } - if repo.base.lmetadata_expire >= 0 && !self.rc.cli.cacheonly { - match &repo.details.cache_name { - Some(s) => { - let refresh_flag = - self.rc.conf.cachedir.clone() + s.as_str() + "/" + REPO_METADATA_MARKER; - if should_sync_metadata(refresh_flag.as_str(), repo.base.lmetadata_expire)? - { - if check_root().is_err() { - if !self.rc.cli.cacheonly { - bail!(ERROR_RDNF_CACHE_REFRESH); - } - } - let repodata_dir = PathBuf::from( - self.rc.conf.cachedir.clone() - + s.as_str() - + "/" - + REPODATA_DIR_NAME, - ); - recursively_remove_dir(&repodata_dir)?; - let solv_cache_dir = PathBuf::from( - self.rc.conf.cachedir.to_string() - + s.as_str() - + "/" - + SOLVCACHE_DIR_NAME, - ); - recursively_remove_dir(&solv_cache_dir)?; - } - let psz_id = repo.psz_id.clone(); - let status = if repo.base.skip_if_unavailable { - match self.rc.init_repo(repo) { - Ok(s) => { - new_repos.push(s); - format!("{}", style("Done").green()) - } - Err(_) => { - format!("{}", style("Skip").red()) - } - } - } else { - new_repos.push(self.rc.init_repo(repo)?); - format!("{}", style("Done").green()) - }; - if self.rc.cli.refresh { - let (_, width) = self.rc.term.size(); - let offset = (width - 10) as usize; - self.rc.term.write_line(&psz_id)?; - self.rc.term.move_cursor_up(1)?; - self.rc.term.move_cursor_right(offset)?; - self.rc.term.write_line(&status)?; - } - } - None => { - bail!("repo {} enabled,but need baseurl or metalink", repo.psz_id); - } - } - } - } - self.repos = new_repos; - // Ok(self) - Ok(()) - } -} -pub fn should_sync_metadata(f: &str, expire: i128) -> Result { - let should = match metadata(f) { - Ok(m) => { - let mtime = m.modified()?; - let now = SystemTime::now(); - let duration = now.duration_since(mtime)?; - let s = duration.as_secs(); - if s as i128 >= expire { - true - } else { - false - } - } - Err(_) => true, - }; - // if should { - // let file = OpenOptions::new().append(true).create(true).write(true).open(f)?; - // file.write_at(buf, offset) - // } - // let file = ; - Ok(should) -} -pub fn recursively_remove_dir(dir: &PathBuf) -> Result<()> { - if dir.is_dir() { - match read_dir(dir) { - Ok(s) => { - for x in s { - let entry = x?; - let path = entry.path(); - if path.is_dir() { - recursively_remove_dir(&path)?; - } else if path.is_file() { - remove_file(path)?; - } - } - } - Err(_) => {} - } - remove_dir(dir)?; - } - Ok(()) -} diff --git a/rdnf/src/sub_command/info.rs b/rdnf/src/sub_command/info.rs deleted file mode 100644 index 5a654f70b3ac73071e6ec02a716b01fcdcb4fcc1..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/info.rs +++ /dev/null @@ -1,143 +0,0 @@ -use crate::{ - cli::InfoOption, - i18n::pkg_info::{ - PKG_INFO_ARCH___, PKG_INFO_DESC___, PKG_INFO_EPOCH__, PKG_INFO_LICENSE, PKG_INFO_NAME___, - PKG_INFO_RELEASE, PKG_INFO_REPO___, PKG_INFO_SIZE___, PKG_INFO_SUMMARY, PKG_INFO_URL____, - PKG_INFO_VERSION, - }, - solv::rdnf_query::SolvQuery, - Rdnf, -}; -use anyhow::Result; -// #[derive(Clone)] -// pub enum RdnfScope { -// All, -// Installed, -// Available, -// Extras, -// Obsoletes, -// Recent, -// Upgrades, -// DownGrades, -// } - -#[derive(Debug, Clone)] -pub struct PkgInfo { - pub base: PkgInfoBase, - pub details: PkgInfoDetails, - pub other: Option, -} -#[derive(Debug, Clone)] -pub struct PkgInfoBase { - pub epoch: u32, - pub name: String, - pub version: String, - pub release: String, - pub arch: String, - pub evr: String, - pub repo_name: String, -} -#[derive(Debug, Clone)] -pub struct PkgInfoDetails { - pub install_size: u64, - pub formatted_size: String, - pub summary: String, - pub location: Option, -} -#[derive(Debug, Clone)] -pub struct PkgInfoOther { - pub url: String, - pub license: String, - pub description: String, -} -// impl InfoOption { -// pub fn parse_scope(&self){ -// let mut scopes=Vec::new(); - -// } - -// } -impl Rdnf { - pub fn info_command(&mut self, info_opt: InfoOption) -> Result<()> { - self.make_cache()?; - let mut query = SolvQuery::default(self.rc.sack.clone()); - if info_opt.all || info_opt.installed { - query.solv_add_system_repo_filter()?; - } - if info_opt.available { - query.solv_add_available_repo_filter()?; - } - if !info_opt.pkgs.is_empty() { - query.package_names = Some(info_opt.pkgs); - } - query.solv_apply_list_query()?; - match query.solv_get_query_result() { - Ok(pkg_list) => { - let pkg_infos = - PkgInfo::populate_pkg_info(&self.rc.sack, &pkg_list, PkgInfoLevel::Other)?; - for pkg_info in pkg_infos { - let term = &self.rc.term; - term.write_line( - format!("{}\t : {}", PKG_INFO_NAME___, pkg_info.base.name).as_str(), - )?; - term.write_line( - format!("{}\t : {}", PKG_INFO_ARCH___, pkg_info.base.arch).as_str(), - )?; - term.write_line( - format!("{}\t : {}", PKG_INFO_EPOCH__, pkg_info.base.epoch).as_str(), - )?; - term.write_line( - format!("{}\t : {}", PKG_INFO_VERSION, pkg_info.base.version).as_str(), - )?; - term.write_line( - format!("{}\t : {}", PKG_INFO_RELEASE, pkg_info.base.release).as_str(), - )?; - term.write_line( - format!( - "{}\t : {}", - PKG_INFO_SIZE___, pkg_info.details.formatted_size - ) - .as_str(), - )?; - term.write_line( - format!("{}\t : {}", PKG_INFO_REPO___, pkg_info.base.repo_name).as_str(), - )?; - term.write_line( - format!("{}\t : {}", PKG_INFO_SUMMARY, pkg_info.details.summary).as_str(), - )?; - if pkg_info.other.is_some() { - term.write_line( - format!( - "{}\t : {}", - PKG_INFO_URL____, - pkg_info.other.as_ref().unwrap().url - ) - .as_str(), - )?; - term.write_line( - format!( - "{}\t : {}", - PKG_INFO_LICENSE, - pkg_info.other.as_ref().unwrap().license - ) - .as_str(), - )?; - let desc:Vec<&str> = pkg_info.other.as_ref().unwrap().description.split("\n").collect(); - term.write_line(format!("{}\t : {}",PKG_INFO_DESC___,desc[0]).as_str())?; - for item in &desc[1..]{ - term.write_line(format!("\t\t : {}",item).as_str())?; - } - - } - term.write_line("")?; - } - } - Err(_) => {} - }; - Ok(()) - } -} -pub enum PkgInfoLevel { - Details, - Other, -} diff --git a/rdnf/src/sub_command/install.rs b/rdnf/src/sub_command/install.rs deleted file mode 100644 index 9352ce1f3517c287be541028a4101576757ccf30..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/install.rs +++ /dev/null @@ -1,430 +0,0 @@ -use std::{ - ffi::CString, - fs::{create_dir_all, metadata}, - path::Path, -}; - -use crate::{ - c_lib::{queue_push, queue_empty, get_queue_element_value}, - cli::AlterOption, - default::{CMDLINE_REPO_NAME, RPM_CACHE_DIR_NAME, GPGKEY_CACHE_DIR_NAME}, - errors::{ - ERROR_RDNF_INVALID_PARAMETER, ERROR_RDNF_NOTHING_TO_DO, ERROR_RDNF_NO_MATCH, - ERROR_RDNF_REPO_NOT_FOUND, ERROR_RDNF_URL_INVALID, - }, - goal::SolvedPkgInfo, - solv::rdnf_query::init_queue, - Rdnf, -}; -use anyhow::{bail, Result}; -use console::style; -use dialoguer::{theme::ColorfulTheme, Confirm}; -use solv_sys::ffi::{ - pool_createwhatprovides, repo_add_rpm, repo_internalize, s_Queue, Queue, REPO_NO_INTERNALIZE, - REPO_REUSE_REPODATA, RPM_ADD_WITH_HDRID, RPM_ADD_WITH_SHA256SUM, -}; - -use super::{repo::RepoData, repoutils::download_file}; -impl Rdnf { - pub fn alter_command( - &mut self, - pkgs: Vec, - alter_type: AlterType, - alter_args: &AlterOption, - ) -> Result<()> { - let solved_pkg_info = self.resolve(&pkgs, alter_type.clone(), alter_args)?; - let silent = alter_args.quiet && alter_args.assume_yes; - if !silent && !solved_pkg_info.not_resolved.is_empty() { - for pkg_name in &solved_pkg_info.not_resolved { - self.rc - .term - .write_line(&format!("No package {} available", style(pkg_name).red()))?; - } - } - if solved_pkg_info.need_action == 0 { - if solved_pkg_info.not_resolved.is_empty() { - bail!(ERROR_RDNF_NO_MATCH); - } else { - bail!(ERROR_RDNF_NOTHING_TO_DO); - } - } - if !silent { - solved_pkg_info.print(&self.rc.term)?; - if alter_args.download_only { - self.rc - .term - .write_line("rdnf will only download packages needed for the transaction")?; - } - } - if solved_pkg_info.need_action > 0 { - if Confirm::with_theme(&ColorfulTheme::default()) - .with_prompt("Is this ok") - .interact() - .unwrap() - { - if !silent && solved_pkg_info.need_download > 0 { - self.rc.term.write_line("Downloading:")?; - } - self.rpm_exec_transaction(&solved_pkg_info, &alter_type, alter_args)?; - } - } - Ok(()) - } - pub fn resolve( - &mut self, - pkgs: &Vec, - alter_type: AlterType, - alter_args: &AlterOption, - ) -> Result { - let mut queue_goal = init_queue(); - let mut not_resolved = Vec::::new(); - if alter_type.is_install() || alter_type.is_reinstall() { - self.add_cmdline_pkgs(&pkgs, &mut queue_goal as *mut Queue)?; - } - self.make_cache()?; - self.prepare_all_pkgs(alter_type.clone(), pkgs, &mut not_resolved, &mut queue_goal)?; - let solved_pkg_info_base = self.goal(&mut queue_goal, alter_type.clone(), alter_args)?; - solved_pkg_info_base.check_protected_pkgs()?; - Ok(SolvedPkgInfo { - need_action: solved_pkg_info_base.get_need_action(), - need_download: solved_pkg_info_base.get_need_download(), - not_available: None, - existing: None, - not_resolved, - not_installed: None, - base: solved_pkg_info_base, - }) - } - - pub fn add_cmdline_pkgs(&self, pkgs: &Vec, queue: *mut s_Queue) -> Result<()> { - for pkg in pkgs { - let rpm_path = if Path::new(pkg.as_str()).exists() && pkg.ends_with(".rpm") { - pkg.clone() - } else { - if !is_remote_url(pkg.as_str()) { - if !pkg.starts_with("file://") { - continue; - } else { - let k = pkg.split_once("file://").unwrap().1; - if k == "" || k.matches("#").collect::>().len() > 0 { - bail!(ERROR_RDNF_URL_INVALID); - }; - "/".to_string() + k.split_once("/").unwrap().1 - } - } else { - let pkg_name = pkg.rsplit_once("/").unwrap().1; - match self.repos.iter().find(|x| x.psz_id == CMDLINE_REPO_NAME) { - Some(repo) => self.download_pkg_to_cache( - pkg.as_str(), - pkg_name, - repo, - RPM_CACHE_DIR_NAME, - )?, - None => { - bail!(ERROR_RDNF_REPO_NOT_FOUND) - } - } - } - }; - unsafe { - let file_path = CString::new(rpm_path.as_str()).unwrap(); - let id = repo_add_rpm( - self.solv_cmdline_repo, - file_path.as_ptr(), - (REPO_REUSE_REPODATA - | REPO_NO_INTERNALIZE - | RPM_ADD_WITH_HDRID - | RPM_ADD_WITH_SHA256SUM) - .try_into() - .unwrap(), - ); - if id == 0 { - bail!(ERROR_RDNF_INVALID_PARAMETER) - } - queue_push(queue, id); - } - } - unsafe { - pool_createwhatprovides(self.rc.sack.pool); - repo_internalize(self.solv_cmdline_repo); - } - Ok(()) - } - pub fn prepare_all_pkgs( - &self, - alter_type: AlterType, - pkgs: &Vec, - not_resolved: &mut Vec, - queue_goal: *mut Queue, - ) -> Result<()> { - let mut queue_local = init_queue(); - match alter_type { - AlterType::DownGradeAll => { - //TODO - } - AlterType::AutoEraseAll => { - //TODO - } - _ => {} - } - let cli = &self.rc.cli; - if (alter_type.is_upgrade_all() || alter_type.is_upgrade()) - && (cli.security || cli.sec_severity.is_some() || cli.reboot_required) - { - let pkgs=self.get_update_pkgs()?; - for pkg_name in pkgs { - self.prepare_single_pkg(pkg_name.as_str(), AlterType::Upgrade, not_resolved, queue_goal)?; - } - } else { - for pkg in pkgs { - if is_glob(pkg.as_str()) { - queue_empty(&mut queue_local); - self.rc.sack.get_glob_pkgs(pkg, &mut queue_local)?; - if queue_local.count ==0{ - not_resolved.push(pkg.to_string()); - }else{ - for index in 0..queue_local.count{ - let id=get_queue_element_value(&queue_local, index as u32); - let pkg_name=self.rc.sack.solv_get_pkg_name_by_id(id)?; - self.prepare_single_pkg(pkg_name.as_str(), alter_type.clone(), not_resolved, queue_goal)?; - } - } - } else { - if Path::new(pkg.as_str()).exists() && pkg.ends_with(".rpm") { - continue; - } - if is_remote_url(pkg.as_str()) || pkg.starts_with("file://") { - continue; - } - self.prepare_single_pkg( - pkg.as_str(), - alter_type.clone(), - not_resolved, - queue_goal, - )?; - } - } - }; - Ok(()) - } - - pub fn prepare_single_pkg( - &self, - pkg_name: &str, - alter_type: AlterType, - not_resolved: &mut Vec, - queue_goal: *mut Queue, - ) -> Result<()> { - match self.rc.sack.solv_count_pkg_by_name(pkg_name) { - Ok(0) => { - not_resolved.push(pkg_name.to_string()); - return Ok(()); - } - Ok(count) => count, - Err(_) => { - bail!("{} package not found or not installed", pkg_name) - } - }; - match alter_type { - AlterType::ReInstall => { - self.rc.sack.add_pkgs_for_reinstall(queue_goal, pkg_name)?; - } - AlterType::Erase | AlterType::AutoErase => { - self.rc.sack.add_pkgs_for_erase(queue_goal, pkg_name)?; - } - AlterType::Install => { - if !self.rc.sack.add_pkgs_for_install(queue_goal, pkg_name)? { - println!("Package {} is already installed", pkg_name); - } - } - AlterType::Upgrade => { - self.rc.sack.add_pkgs_for_upgrade(queue_goal, pkg_name)?; - } - AlterType::DownGradeAll | AlterType::DownGrade => {} - _ => {} - } - Ok(()) - } -} -pub fn is_remote_url(url: &str) -> bool { - let mut is_url = false; - for m in ["http://", "https://", "ftp://", "ftps://"] { - if url.starts_with(m) { - is_url = true; - break; - }; - } - is_url -} -pub fn is_glob(s: &str) -> bool { - for ele in s.chars() { - if ele == '*' || ele == '?' || ele == '[' { - return true; - } - } - false -} - -impl Rdnf { - pub fn download_pkg_to_cache( - &self, - url: &str, - pkg_name: &str, - repo: &RepoData, - dir: &str, - ) -> Result { - let rpm_cache_dir = self.rc.conf.cachedir.clone() + repo.psz_id.as_str() + "/" + dir + "/"; - let u = match url.split_once("://") { - Some((_, s)) => match s.split_once("/") { - Some((_, s)) => s, - None => { - bail!(ERROR_RDNF_URL_INVALID) - } - }, - None => { - bail!(ERROR_RDNF_URL_INVALID) - } - }; - let rpm_cache_file = rpm_cache_dir + u; - let download_cache_dir = rpm_cache_file - .rsplit_once("/") - .unwrap() - .0 - .trim_end_matches("/"); - let rpm_cache_file = download_cache_dir.to_string() + "/" + pkg_name; - create_dir_all(download_cache_dir)?; - let (_, width) = self.rc.term.size(); - let repo_width = (width as f32 * 0.6) as usize; - let flag_width = width as usize - repo_width - 4; - - if Path::new(rpm_cache_file.as_str()).exists() { - let m = metadata(rpm_cache_file.as_str())?; - if m.len() > 0 { - let item = format!( - "{:rest$}{:>4}", - pkg_name, - style("exists").green(), - "", - width = repo_width, - rest = flag_width - ); - self.rc.term.write_line(item.as_str())?; - return Ok(rpm_cache_file); - } - } - download_file(&self.rc, repo, url, rpm_cache_file.as_str(), pkg_name)?; - let item = format!( - "{:rest$}{:>4}", - pkg_name, - style("downloaded").green(), - "", - width = repo_width, - rest = flag_width - ); - self.rc.term.write_line(item.as_str())?; - Ok(rpm_cache_file) - } - pub fn download_key_to_cache( - &self, - url: &str, - repo: &RepoData, - ) -> Result { - let (_,rest) = url.split_once("://").unwrap(); - let rest = match rest.rsplit_once("/") { - Some((_,r)) => {r}, - None => {rest}, - }; - let file_path=self.rc.conf.cachedir.to_string()+repo.psz_id.as_str()+"/"+GPGKEY_CACHE_DIR_NAME+"/"+rest; - let (_, width) = self.rc.term.size(); - let repo_width = (width as f32 * 0.6) as usize-" gpgkey".len(); - let flag_width = width as usize - repo_width - 4; - download_file(&self.rc, repo, url, file_path.as_str(), repo.psz_id.as_str())?; - let item = format!( - "{:rest$}{:>4}", - repo.psz_id, - style("downloaded").green(), - "", - width = repo_width, - rest = flag_width - ); - self.rc.term.write_line(item.as_str())?; - Ok(file_path) - } -} - -#[derive(Debug, Clone)] -pub enum AlterType { - AutoErase, - AutoEraseAll, - DownGrade, - DownGradeAll, - Erase, - Install, - ReInstall, - Upgrade, - UpgradeAll, - DistroSync, - Obsoleted, -} -impl AlterType { - pub fn is_auto_erase(&self) -> bool { - match self { - Self::AutoErase => true, - _ => false, - } - } - pub fn is_auto_erase_all(&self) -> bool { - match self { - Self::AutoEraseAll => true, - _ => false, - } - } - pub fn is_down_grade(&self) -> bool { - match self { - Self::DownGradeAll => true, - _ => false, - } - } - pub fn is_erase(&self) -> bool { - match self { - Self::Erase => true, - _ => false, - } - } - pub fn is_install(&self) -> bool { - match self { - Self::Install => true, - _ => false, - } - } - pub fn is_reinstall(&self) -> bool { - match self { - Self::ReInstall => true, - _ => false, - } - } - pub fn is_upgrade(&self) -> bool { - match self { - Self::Upgrade => true, - _ => false, - } - } - pub fn is_upgrade_all(&self) -> bool { - match self { - Self::UpgradeAll => true, - _ => false, - } - } - pub fn is_distro_sync(&self) -> bool { - match self { - Self::DistroSync => true, - _ => false, - } - } - pub fn is_obsoleted(&self) -> bool { - match self { - Self::Obsoleted => true, - _ => false, - } - } -} diff --git a/rdnf/src/sub_command/mod.rs b/rdnf/src/sub_command/mod.rs deleted file mode 100644 index 24e367b0a9065ad0cfd4055d4ad3bd2bd683a645..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub mod repo; -pub mod cache; -pub mod repoutils; -pub mod search; -pub mod install; -pub mod update; -pub mod info; \ No newline at end of file diff --git a/rdnf/src/sub_command/repo.rs b/rdnf/src/sub_command/repo.rs deleted file mode 100644 index 4fce7ccbcd5b6a4c5eded6fc14b6c228bdf44466..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/repo.rs +++ /dev/null @@ -1,514 +0,0 @@ -use std::{ffi::CString, os::raw::c_void}; - -use anyhow::{bail, Result}; -use config::Config; -use console::style; -use glob::Pattern; -use solv_sys::ffi::{pool_createwhatprovides, repo_create, Repo}; - -use crate::solv::sack::solv_create_cache_name; -use crate::solv::sack::Solvsack; -use crate::{ - cli::Commands, - conf::ConfigMain, - default::{VAR_BASEARCH, VAR_RELEASEVER}, - errors::ERROR_RDNF_INVALID_PARAMETER, - i18n::repo_list::{ - REPOLIST_REPO_ID, REPOLIST_REPO_NAME, REPOLIST_REPO_STATUS, REPOLIST_REPO_STATUS_DISABLED, - REPOLIST_REPO_STATUS_ENABLED, - }, - solv::SolvRepoInfoInernal, - utils::check_dir, - Cli, Rdnf, -}; - -const CMDLINE_REPO_NAME: &str = "@cmdline"; -pub enum RepoListFilter { - All, - Enabled, - Disabled, -} - -impl Rdnf { - pub fn repo_list(self) -> Result<()> { - match self.rc.cli.command { - Commands::Repolist(opt) => { - let sum = opt.all as usize + opt.enabled as usize + opt.disabled as usize; - if sum > 1 { - bail!("you can only choose one of three options (all,enabled,disabled)") - }; - let filter = match opt.all { - true => RepoListFilter::All, - false => match opt.enabled { - true => RepoListFilter::Enabled, - false => match opt.disabled { - true => RepoListFilter::Disabled, - false => RepoListFilter::Enabled, - }, - }, - }; - let (_heigt, width) = self.rc.term.size(); - let l = (width as f32 * 0.3) as usize; - let c = (width as f32 * 0.5) as usize; - let r = width as usize - l - c; - let title = format!( - "{: true, - RepoListFilter::Enabled => repo.base.enabled, - RepoListFilter::Disabled => !repo.base.enabled, - }; - if repo.psz_name == CMDLINE_REPO_NAME { - is_show = false; - } - if is_show { - let status = match repo.base.enabled { - true => { - format!("{}", style(REPOLIST_REPO_STATUS_ENABLED).green()) - } - false => { - format!("{}", style(REPOLIST_REPO_STATUS_DISABLED).red()) - } - }; - let item = format!( - "{: {} - } - Ok(()) - } -} -pub fn load_repo_data(config: &ConfigMain, filter: RepoListFilter) -> Result> { - let repo_cmdline = RepoData::create_repo(CMDLINE_REPO_NAME); - let mut repodatas: Vec = Vec::new(); - repodatas.push(repo_cmdline); - let dir = config.repodir.as_str(); - check_dir(dir)?; - match glob::glob((dir.to_string() + "*.repo").as_str()) { - Ok(paths) => { - match Config::builder() - .add_source( - paths - .map(|p| config::File::from(p.unwrap()).format(config::FileFormat::Ini)) - .collect::>(), - ) - .build() - { - Ok(c) => { - for (psz_id, value) in c - .try_deserialize::>() - .unwrap() - { - let mut repo = RepoData::create_repo(psz_id.as_str()); - for (key, v) in value.into_table().unwrap() { - repo.update_repo(key.as_str(), v)? - } - match filter { - RepoListFilter::All => repodatas.push(repo), - RepoListFilter::Enabled => { - if repo.base.enabled { - repodatas.push(repo) - } - } - RepoListFilter::Disabled => { - if !repo.base.enabled { - repodatas.push(repo) - } - } - }; - } - } - Err(_) => { - bail!("Failed to parse config files .repo dir {}", dir) - } - }; - } - Err(_) => { - bail!("Failed to search .repo files in dir {}", dir) - } - }; - Ok(repodatas) -} -pub fn repo_list_finalize( - cli: &mut Cli, - config: &ConfigMain, - repos: &mut Vec, -) -> Result<()> { - match &cli.repoid { - Some(v) => { - alter_repo_state_enable(repos, false, "*")?; - for pattern in v { - alter_repo_state_enable(repos, true, pattern)?; - } - } - None => {} - } - match &cli.enablerepo { - Some(v) => { - for pattern in v { - alter_repo_state_enable(repos, true, pattern)?; - } - } - None => {} - } - match &cli.disablerepo { - Some(v) => { - for pattern in v { - alter_repo_state_enable(repos, false, pattern)?; - } - } - None => {} - } - for repo in repos { - repo.psz_name = repo - .psz_name - .replace(VAR_RELEASEVER, &config.var_release_ver) - .replace(VAR_BASEARCH, &config.var_base_arch); - match &repo.details.base_url { - Some(s) => { - repo.details.base_url = Some( - s.replace(VAR_RELEASEVER, &config.var_release_ver) - .replace(VAR_BASEARCH, &config.var_base_arch), - ); - } - None => {} - } - match &repo.details.meta_link { - Some(s) => { - let meta_link = s - .replace(VAR_RELEASEVER, &config.var_release_ver) - .replace(VAR_BASEARCH, &config.var_base_arch); - repo.details.cache_name = Some(solv_create_cache_name(&repo.psz_id, &meta_link)?); - repo.details.meta_link = Some(meta_link); - } - None => match &repo.details.base_url { - Some(s) => { - repo.details.cache_name = Some(solv_create_cache_name(&repo.psz_id, s)?); - } - None => {} - }, - } - match &repo.details.url_gpg_keys { - Some(s) => { - let mut url_gpg_keys=Vec::new(); - for ele in s { - let m=ele.replace(VAR_RELEASEVER, &config.var_release_ver) - .replace(VAR_BASEARCH, &config.var_base_arch); - url_gpg_keys.push(m); - } - repo.details.url_gpg_keys=Some(url_gpg_keys); - }, - None => {}, - } - } - Ok(()) -} -pub fn init_cmdline_repo(sack: &mut Solvsack) -> Result<*mut Repo> { - unsafe { - let repo_name = CString::new(CMDLINE_REPO_NAME).unwrap(); - let repo = repo_create(sack.pool, repo_name.as_ptr()); - if repo.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - let solv_repo_info = SolvRepoInfoInernal { - repo, - cookie: None, - n_cookie_set: None, - repo_cache_dir: None, - }; - let p = Box::into_raw(Box::new(solv_repo_info)) as *mut c_void; - (*repo).appdata = p; - pool_createwhatprovides(sack.pool); - Ok(repo) - } -} -fn alter_repo_state_enable(repos: &mut Vec, enable: bool, pattern: &str) -> Result<()> { - match Pattern::new(pattern) { - Ok(p) => { - for repo in &mut *repos { - if p.matches(&repo.psz_id) { - repo.base.enabled = enable; - } - } - } - Err(e) => { - bail!("Failed to enablerepo {}, because {}", &pattern, e) - } - } - Ok(()) -} - -#[derive(Debug, Clone, Copy)] -pub struct RepoDataBase { - pub enabled: bool, - pub skip_if_unavailable: bool, - pub gpgcheck: bool, - pub priority: i32, - pub timeout: u64, - pub retries: i32, - pub minrate: u32, - pub throttle: u64, - pub sslverify: bool, - pub lmetadata_expire: i128, - pub skip_md_filelists: bool, - pub skip_md_updateinfo: bool, - pub skip_md_other: bool, -} -impl RepoDataBase { - pub fn default() -> Self { - RepoDataBase { - enabled: true, - skip_if_unavailable: false, - gpgcheck: true, - sslverify: true, - lmetadata_expire: 172800, - priority: 50, - timeout: 0, - minrate: 0, - throttle: 0, - retries: 10, - skip_md_filelists: false, - skip_md_updateinfo: false, - skip_md_other: false, - } - } -} -#[derive(Debug, Clone)] -pub struct RepoDataDetails { - pub base_url: Option, - pub meta_link: Option, - pub url_gpg_keys: Option>, - pub username: Option, - pub password: Option, - pub ssl_ca_cert: Option, - pub ssl_client_cert: Option, - pub ssl_client_key: Option, - pub cache_name: Option, -} -impl RepoDataDetails { - pub fn new() -> Self { - RepoDataDetails { - cache_name: None, - base_url: None, - meta_link: None, - url_gpg_keys: None, - username: None, - password: None, - ssl_ca_cert: None, - ssl_client_cert: None, - ssl_client_key: None, - } - } -} -#[derive(Debug, Clone)] -pub struct RepoData { - pub base: RepoDataBase, - pub psz_id: String, - pub psz_name: String, - pub details: RepoDataDetails, -} -impl RepoData { - pub fn create_repo(psz_id: &str) -> Self { - RepoData { - base: RepoDataBase::default(), - psz_id: String::from(psz_id), - psz_name: String::from(psz_id), - details: RepoDataDetails::new(), - } - } - pub fn update_repo(self: &mut Self, key: &str, v: config::Value) -> Result<()> { - match key { - "enabled" => match v.into_bool() { - Ok(b) => self.base.enabled = b, - Err(_) => { - bail!("repo {} enabled should be 0 or 1", self.psz_id) - } - }, - "name" => match v.into_string() { - Ok(s) => self.psz_name = s, - Err(_) => { - bail!("repo {} name should be String", self.psz_id) - } - }, - "baseurl" => match v.into_string() { - Ok(s) => self.details.base_url = Some(s), - Err(_) => { - bail!("repo {} baseurl should be String", self.psz_id) - } - }, - "metalink" => match v.into_string() { - Ok(s) => self.details.meta_link = Some(s), - Err(_) => { - bail!("repo {} metalink should be String", self.psz_id) - } - }, - "skip_if_unavailable" => match v.into_bool() { - Ok(b) => self.base.skip_if_unavailable = b, - Err(_) => { - bail!("repo {} skip_if_unavailable should be 0 or 1", self.psz_id) - } - }, - "gpgcheck" => match v.into_bool() { - Ok(b) => self.base.gpgcheck = b, - Err(_) => { - bail!("repo {} gpgcheck should be 0 or 1", self.psz_id) - } - }, - "gpgkey" => match v.into_string() { - Ok(s) => { - let mut gpg_keys: Vec = Vec::new(); - for ele in s.split(" ").collect::>() { - gpg_keys.push(String::from(ele)); - } - self.details.url_gpg_keys = Some(gpg_keys); - } - Err(_) => { - bail!( - "repo {} gpgkey should be string array split by ' ' ", - self.psz_id - ) - } - }, - "username" => match v.into_string() { - Ok(s) => self.details.username = Some(s), - Err(_) => { - bail!("repo {} username should be string", self.psz_id) - } - }, - "password" => match v.into_string() { - Ok(s) => self.details.password = Some(s), - Err(_) => { - bail!("repo {} password should be string", self.psz_id) - } - }, - "priority" => match v.into_int() { - Ok(i) => self.base.priority = i as i32, - Err(_) => { - bail!("repo {} priority should be int32", self.psz_id) - } - }, - "timeout" => match v.into_int() { - Ok(i) => self.base.timeout = i as u64, - Err(_) => { - bail!("repo {} timeout should be int32", self.psz_id) - } - }, - "retries" => match v.into_int() { - Ok(i) => self.base.retries = i as i32, - Err(_) => { - bail!("repo {} retries should be int32", self.psz_id) - } - }, - "minrate" => match v.into_int() { - Ok(i) => self.base.minrate = i as u32, - Err(_) => { - bail!("repo {} minrate should be int32", self.psz_id) - } - }, - "throttle" => match v.into_int() { - Ok(i) => self.base.throttle = i as u64, - Err(_) => { - bail!("repo {} throttle should be int32", self.psz_id) - } - }, - "sslverify" => match v.into_bool() { - Ok(b) => self.base.sslverify = b, - Err(_) => { - bail!("repo {} sslverify should be 0 or 1", self.psz_id) - } - }, - "sslcacert" => match v.into_string() { - Ok(s) => self.details.ssl_ca_cert = Some(s), - Err(_) => { - bail!("repo {} sslcacert should be string", self.psz_id) - } - }, - "sslclientcert" => match v.into_string() { - Ok(s) => self.details.ssl_client_cert = Some(s), - Err(_) => { - bail!("repo {} sslckuebtcert should be string", self.psz_id) - } - }, - "sslclientkey" => match v.into_string() { - Ok(s) => self.details.ssl_client_key = Some(s), - Err(_) => { - bail!("repo {} sslclientkey should be string", self.psz_id) - } - }, - "metadata_expire" => match v.into_string() { - Ok(s) => { - if s == "never" { - self.base.lmetadata_expire = -1; - } else { - self.base.lmetadata_expire = match s.parse::() { - Ok(t) => t, - Err(_) => { - let (num, mul) = s.split_at(s.len() - 1); - let n = match num.parse::() { - Ok(n) => n, - Err(_) => { - bail!("repo {} metadata_expire should be like 1 or 1d or 1h or 1m or 1s",self.psz_id) - } - }; - match mul { - "s" => n, - "m" => 60 * n, - "h" => 60 * 60 * n, - "d" => 60 * 60 * 24 * n, - _ => { - bail!("repo {} metadata_expire the unit of time should be d,h,m,s(default)",self.psz_id) - } - } - } - } - } - } - Err(_) => { - bail!( - "repo {} metadata_expire can't be parse to string ", - self.psz_id - ) - } - }, - "skip_md_filelists" => match v.into_bool() { - Ok(b) => self.base.skip_md_filelists = b, - Err(_) => { - bail!("repo {} skip_md_filelists should be 0 or 1", self.psz_id) - } - }, - "skip_md_updateinfo" => match v.into_bool() { - Ok(b) => self.base.skip_md_updateinfo = b, - Err(_) => { - bail!("repo {} skip_md_updateinfo should be 0 or 1", self.psz_id) - } - }, - "skip_md_other" => match v.into_bool() { - Ok(b) => self.base.skip_md_other = b, - Err(_) => { - bail!("repo {} skip_md_other should be 0 or 1", self.psz_id) - } - }, - _ => {} - } - Ok(()) - } -} diff --git a/rdnf/src/sub_command/repoutils.rs b/rdnf/src/sub_command/repoutils.rs deleted file mode 100644 index 9113e5f9d75c9b64d6462af62b544af1c57526b8..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/repoutils.rs +++ /dev/null @@ -1,478 +0,0 @@ -use crate::{ - default::{ - REPODATA_DIR_NAME, REPO_BASEURL_FILE_NAME, REPO_METADATA_FILE_NAME, - REPO_METADATA_FILE_PATH, REPO_METADATA_MARKER, REPO_METALINK_FILE_NAME, RPM_CACHE_DIR_NAME, - SOLVCACHE_DIR_NAME, SOLV_COOKIE_LEN, - }, - errors::ERROR_RDNF_INVALID_PARAMETER, - metalink::MetalinkContext, - repomd::RepoMd, - solv::{ - rdnf_repo::{solv_create_metadata_cache, solv_read_yum_repo, solv_user_metadata_cache}, - sack::solv_calcuate_cookie_for_file, - SolvRepoInfoInernal, - }, - RdnfContext, -}; -use anyhow::{bail, Result}; -use console::Term; -use curl::easy::{Easy2, Handler}; -use indicatif::{ProgressBar, ProgressStyle}; -use libc::c_void; -use md5::Digest; -use md5::Md5; -use sha1::Sha1; -use sha2::{Sha256, Sha512}; -use solv_sys::ffi::{pool_createwhatprovides, repo_create}; -use std::{ - ffi::CString, - fs::{create_dir_all, remove_file, rename, File, OpenOptions}, - io::{self, Read, Write}, - path::{Path, PathBuf}, - time::Duration, -}; - -use super::{cache::recursively_remove_dir, repo::RepoData}; - -impl RdnfContext { - pub fn init_repo(&self, repo: RepoData) -> Result { - let cache_name = repo.details.cache_name.as_ref().unwrap().as_str(); - let repo_cache_dir = self.conf.cachedir.clone() + cache_name + "/"; - let mut repo = repo; - let repo_md = self.get_repo_md(&mut repo, repo_cache_dir.clone())?; - let pool = self.sack.pool; - let id_ptr = CString::new(repo.psz_id.as_str()).unwrap(); - unsafe { - let p_repo = repo_create(pool, id_ptr.as_ptr()); - if p_repo.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - } - let repo_md_file = repo_cache_dir.clone() + REPO_METADATA_FILE_PATH; - let cookie = solv_calcuate_cookie_for_file(repo_md_file.as_str())?; - let solv_repo_info = SolvRepoInfoInernal { - repo: p_repo, - cookie: Some(cookie), - n_cookie_set: Some(1), - repo_cache_dir: Some(repo_cache_dir.clone()), - }; - let p = Box::into_raw(Box::new(&solv_repo_info)) as *mut c_void; - (*p_repo).appdata = p; - let psz_cache_file_path = - repo_cache_dir.clone() + SOLVCACHE_DIR_NAME + "/" + repo.psz_id.as_str() + ".solv"; - if !solv_user_metadata_cache(&solv_repo_info, psz_cache_file_path.as_str())? { - solv_read_yum_repo(&solv_repo_info.repo, repo_md_file.clone(), repo_md)?; - solv_create_metadata_cache(&solv_repo_info, repo.psz_id.as_str())?; - } - pool_createwhatprovides(self.sack.pool); - } - Ok(repo) - } - pub fn get_repo_md(&self, repo: &mut RepoData, repo_cache_dir: String) -> Result { - let mut replace_repo_md = false; - let metalink = repo.details.meta_link.is_some(); - let keepcache = self.conf.keepcache; - let mut replace_base_url = false; - let mut new_repo_md_file = false; - let mut cookie: [u8; SOLV_COOKIE_LEN] = [0; SOLV_COOKIE_LEN]; - if repo.details.base_url.is_none() && repo.details.meta_link.is_none() { - bail!("Cannot find a valid base URL for repo: {}", repo.psz_name); - } - let repo_data_dir = repo_cache_dir.clone() + REPODATA_DIR_NAME + "/"; - let solv_cache_dir = repo_cache_dir.clone() + SOLVCACHE_DIR_NAME + "/"; - let last_refresh_marker = repo_cache_dir.clone() + REPO_METADATA_MARKER; - let rpms_cache_dir = repo_cache_dir.clone() + RPM_CACHE_DIR_NAME + "/"; - let repo_md_file = repo_data_dir.clone() + REPO_METADATA_FILE_NAME; - let meta_link_file = repo_data_dir.clone() + REPO_METALINK_FILE_NAME; - let base_url_file = repo_data_dir.clone() + REPO_BASEURL_FILE_NAME; - let tmp_repo_data_dir = repo_cache_dir.clone() + "tmp/"; - let tmp_repo_md_file = tmp_repo_data_dir.clone() + REPO_METADATA_FILE_NAME; - let tmp_meta_link_file = tmp_repo_data_dir.clone() + REPO_METALINK_FILE_NAME; - let tmp_base_url_file = tmp_repo_data_dir.clone() + REPO_BASEURL_FILE_NAME; - let mut need_download = if repo.details.meta_link.is_some() { - !Path::new(meta_link_file.as_str()).exists() - && !Path::new(base_url_file.as_str()).exists() - } else { - !Path::new(repo_md_file.as_str()).exists() - }; - if self.cli.refresh { - if repo.details.meta_link.is_some() { - if Path::new(&meta_link_file).exists() { - cookie = solv_calcuate_cookie_for_file(meta_link_file.as_str())?; - } - } else { - if Path::new(&repo_md_file).exists() { - cookie = solv_calcuate_cookie_for_file(repo_md_file.as_str())?; - } - } - need_download = true; - } - - if need_download && !self.cli.cacheonly { - create_dir_all(&tmp_repo_data_dir)?; - if metalink { - let url = repo.details.meta_link.clone().unwrap(); - download_file( - &self, - repo, - url.as_str(), - tmp_meta_link_file.as_str(), - &repo.psz_name, - )?; - let mut mc = match MetalinkContext::from_with_filename( - &tmp_meta_link_file, - REPO_METADATA_FILE_NAME, - ) { - Ok(s) => s, - Err(_) => { - bail!("check {}.repo metalink url", repo.psz_id) - } - }; - - replace_repo_md = true; - if cookie[0] != 0 { - let tmp_cookie = solv_calcuate_cookie_for_file(tmp_meta_link_file.as_str())?; - if tmp_cookie == cookie { - replace_repo_md = false; - } - } - if replace_repo_md { - let mut choose_url = None; - for ele in mc.urls { - ele.url.ends_with(REPO_METADATA_FILE_PATH); - if download_file( - &self, - repo, - ele.url.as_str(), - tmp_repo_md_file.as_str(), - repo.psz_name.as_str(), - ) - .is_ok() - { - choose_url = Some(ele.url.clone()); - break; - }; - } - match choose_url { - Some(choose_url) => { - let tmp_base_url_file = - tmp_repo_data_dir.clone() + REPO_BASEURL_FILE_NAME; - let baseurl = choose_url.trim_end_matches(REPO_METADATA_FILE_PATH); - repo.details.base_url = Some(baseurl.to_string()); - if Path::new(&tmp_base_url_file).exists() { - remove_file(&tmp_base_url_file)?; - } - OpenOptions::new() - .write(true) - .create(true) - .open(tmp_base_url_file)? - .write_all(baseurl.as_bytes())?; - mc.hashs.sort_by(|a, b| a.value.cmp(&b.value)); - let mut flag = mc.hashs.len(); - for ele in mc.hashs { - if ele - .kind - .checksum(tmp_repo_md_file.as_str(), ele.value.as_str())? - { - break; - }; - flag -= 1; - } - if flag == 0 { - bail!("{}.repo repomd.xml invalid", repo.psz_id); - } - } - None => { - bail!("{}.repo metalink don't have vaild url ", repo.psz_id) - } - } - replace_base_url = true; - new_repo_md_file = true; - - if Path::new(&repo_md_file).exists() { - let cookie = solv_calcuate_cookie_for_file(repo_md_file.as_str())?; - let tmp_cookie = solv_calcuate_cookie_for_file(tmp_repo_md_file.as_str())?; - if cookie == tmp_cookie { - replace_repo_md = false; - } - } - } - } else { - let url = repo - .details - .base_url - .clone() - .unwrap() - .trim_end_matches("/") - .to_string() - + "/" - + REPO_METADATA_FILE_PATH; - download_file( - &self, - repo, - url.as_str(), - tmp_repo_md_file.as_str(), - repo.psz_name.as_str(), - )?; - replace_repo_md = true; - if cookie[0] != 0 { - let tmp_cookie = solv_calcuate_cookie_for_file(tmp_repo_md_file.as_str())?; - if tmp_cookie == cookie { - replace_repo_md = false; - } - } - new_repo_md_file = true; - } - } - - if metalink && !replace_base_url && Path::new(base_url_file.as_str()).exists() { - let mut buf = String::new(); - File::open(base_url_file.clone()) - .unwrap() - .read_to_string(&mut buf)?; - repo.details.base_url = Some(buf.trim_end_matches("/").to_string() + "/"); - } - if replace_repo_md { - recursively_remove_dir(&PathBuf::from(repo_data_dir.clone()))?; - recursively_remove_dir(&PathBuf::from(solv_cache_dir))?; - match remove_file(last_refresh_marker.clone()) { - Ok(_) => {} - Err(_) => {} - }; - if !keepcache { - recursively_remove_dir(&PathBuf::from(rpms_cache_dir))?; - } - create_dir_all(repo_data_dir)?; - rename(tmp_repo_md_file, repo_md_file.clone())?; - } - if new_repo_md_file { - OpenOptions::new() - .write(true) - .create(true) - .open(last_refresh_marker)?; - } - if metalink && replace_base_url { - rename(tmp_meta_link_file, meta_link_file)?; - rename(tmp_base_url_file, base_url_file)?; - } - - let repo_md = RepoMd::parse_from(repo_md_file.as_str())?; - let repo_md = repo_md.ensure_repo_md_parts(&self, repo, repo_cache_dir.clone())?; - - Ok(repo_md) - } -} -// impl RdnfContext { -pub fn download_file( - rc: &RdnfContext, - repo: &RepoData, - url: &str, - file: &str, - msg: &str, -) -> Result<()> { - let pb = rc.multi_process.add(ProgressBar::new(0)); - let (_, width) = Term::stdout().size(); - let style = format!("{{msg:{}}}{{spinner:.green}}[{{bar:{}.cyan/blue}}] {{bytes}}/{{total_bytes}} ({{bytes_per_sec}},{{eta}})",width /3,width/3); - let style = ProgressStyle::with_template(style.as_str()) - .unwrap() - .progress_chars("#>-"); - pb.set_style(style); - pb.set_message(msg.to_string()); - let file_path = format!("{}.tmp", file); - let mut easy = Easy2::new(Collector { - buffer: Vec::new(), - pb, - file: OpenOptions::new() - .write(true) - .append(true) - .create(true) - .open(file_path.as_str()) - .unwrap(), - }); - - if let Some(user) = &repo.details.username { - easy.username(user.as_str())?; - } - if let Some(password) = &repo.details.password { - easy.password(password.as_str())?; - } - if let Some(proxy) = &rc.conf.proxy { - easy.proxy(proxy.as_str())?; - if let Some(username) = &rc.conf.proxy_username { - easy.proxy_username(username.as_str())?; - if let Some(password) = &rc.conf.proxy_password { - easy.proxy_password(password.as_str())?; - } - } - } - easy.timeout(Duration::from_secs(repo.base.timeout))?; - easy.low_speed_time(Duration::from_secs(repo.base.timeout))?; - easy.low_speed_limit(repo.base.minrate)?; - easy.max_recv_speed(repo.base.throttle)?; - easy.ssl_verify_peer(repo.base.sslverify)?; - easy.ssl_verify_host(repo.base.sslverify)?; - if let Some(cert) = &repo.details.ssl_ca_cert { - easy.ssl_cainfo_blob(cert.as_bytes())?; - } - if let Some(cert) = &repo.details.ssl_client_cert { - easy.ssl_cert(cert)?; - } - if let Some(key) = &repo.details.ssl_client_key { - easy.ssl_key(key)?; - } - easy.url(url)?; - easy.follow_location(true)?; - easy.progress(true)?; - match easy.perform() { - Ok(_) => {} - Err(_) => { - for x in 1..10 { - let co = easy.get_mut(); - co.pb.set_message(format!("{}: retrying {}/10", msg, x)); - match easy.perform() { - Ok(_) => { - break; - } - Err(_) => { - continue; - } - } - } - } - } - let collector = easy.get_mut(); - collector.finish()?; - collector.pb.finish_and_clear(); - let status = easy.response_code()?; - if status >= 400 { - bail!("{} when downloading {} Please check repo url or refresh metadata with 'rdnf makecache'",status,url); - } else { - rename(file_path, file)?; - } - - Ok(()) -} -// } - -struct Collector { - buffer: Vec, - pb: ProgressBar, - file: File, -} -impl Collector { - fn finish(&mut self) -> Result<()> { - self.file.write(self.buffer.as_slice())?; - self.buffer.clear(); - Ok(()) - } -} -const LIMIT: usize = 4 * 1024; -impl Handler for Collector { - fn write(&mut self, data: &[u8]) -> Result { - self.buffer.extend_from_slice(data); - if self.buffer.len() >= LIMIT { - match self.file.write_all(self.buffer.as_slice()) { - Ok(_) => {} - Err(_) => { - return Err(curl::easy::WriteError::Pause); - } - }; - self.buffer.clear(); - } - Ok(data.len()) - } - fn progress(&mut self, dltotal: f64, dlnow: f64, _ultotal: f64, _ulnow: f64) -> bool { - if dltotal <= 0.0 { - self.pb.set_length(0); - } - self.pb.set_length(dltotal as u64); - self.pb.set_position(dlnow as u64); - true - } -} -#[derive(Debug, Clone)] -pub enum HashKind { - MD5, - SHA1, - SHA256, - SHA512, - Invalid, -} -impl From<&str> for HashKind { - #[inline] - fn from(kind: &str) -> Self { - match kind.as_bytes() { - b"md5" => Self::MD5, - b"sha1" => Self::SHA1, - b"sha256" => Self::SHA256, - b"sha512" => Self::SHA512, - _ => Self::Invalid, - } - } -} -impl HashKind { - #[inline] - pub fn checksum(self, file: &str, hash: &str) -> Result { - let mut file = File::open(file)?; - let sum = match self { - HashKind::MD5 => { - let mut hasher = Md5::new(); - io::copy(&mut file, &mut hasher)?; - hex::encode(hasher.finalize()) - } - HashKind::SHA1 => { - let mut hasher = Sha1::new(); - io::copy(&mut file, &mut hasher)?; - hex::encode(hasher.finalize()) - } - HashKind::SHA256 => { - let mut hasher = Sha256::new(); - io::copy(&mut file, &mut hasher)?; - hex::encode(hasher.finalize()) - } - - HashKind::SHA512 => { - let mut hasher = Sha512::new(); - io::copy(&mut file, &mut hasher)?; - hex::encode(hasher.finalize()) - } - HashKind::Invalid => { - bail!("Invalid hash algorithm") - } - }; - Ok(sum == hash) - } -} -// #[inline] -// pub fn check_hash(kind:&String,file:&str,hash:&String)->Result{ -// let mut file=File::open(file)?; -// let s=match kind.as_bytes() { -// b"md5" => { -// let mut hasher = Md5::new(); -// io::copy(&mut file,&mut hasher)?; -// hex::encode(hasher.finalize()) -// }, -// b"sha1"=>{ -// let mut hasher=Sha1::new(); -// io::copy(&mut file,&mut hasher)?; -// hex::encode(hasher.finalize()) -// }, -// b"sha256"=>{ -// let mut hasher=Sha256::new(); -// io::copy(&mut file,&mut hasher)?; -// hex::encode(hasher.finalize()) -// } -// b"sha512"=>{ -// let mut hasher=Sha512::new(); -// io::copy(&mut file, &mut hasher)?; -// hex::encode(hasher.finalize()) -// } -// _=>{ -// bail!("invalid hash {}",kind) -// } -// }; -// Ok(s==hash.as_str()) -// } -// #[test] -// fn test(){ -// assert!(check_hash(&"md5".to_string(),"/var/cache/rdnf/rpmfusion-nonfree-updates-77c32c35/tmp/repomd.xml",&"d56148af65634f42c1a2c9bd6a0597be".to_string()).unwrap()); -// } diff --git a/rdnf/src/sub_command/search.rs b/rdnf/src/sub_command/search.rs deleted file mode 100644 index 4902686e5fca0a2bd7f541c6d6c4a6d3e780606f..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/search.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::ffi::{CStr, CString}; - -use anyhow::{bail, Result}; -use console::style; -use solv_sys::ffi::{ - dataiterator_free, dataiterator_set_keyname, dataiterator_step, queue_free, queue_insertn, - selection_solvables, solv_knownid_SOLVABLE_ARCH, solv_knownid_SOLVABLE_DESCRIPTION, - solv_knownid_SOLVABLE_NAME, solv_knownid_SOLVABLE_SUMMARY, solvable_lookup_str, Dataiterator, - Queue, SEARCH_NOCASE, SEARCH_SUBSTRING, SOLVER_SOLVABLE, -}; - -use crate::{ - c_lib::{ - create_dataiterator_empty, dataiterator_init_simple, dataiterator_set_search_simple, - get_queue_element_value, pool_id2solvable, queue_empty, queue_push2, - }, - errors::{ERROR_RDNF_NO_DATA, ERROR_RDNF_NO_MATCH}, - solv::rdnf_query::{init_queue, SolvQuery}, - Rdnf, -}; - -impl Rdnf { - pub fn search_pkg(&mut self, pkgs: Vec) -> Result<()> { - self.make_cache()?; - let pkg_infos = SolvQuery::default(self.rc.sack.clone()) - .apply_search(pkgs)? - .get_query_result()?; - let offset = pkg_infos - .iter() - .max_by_key(|x| x.pkg_name.len()) - .unwrap() - .pkg_name - .len() - + 5; - let (_, width) = self.rc.term.size(); - for pkg in pkg_infos { - self.rc - .term - .write_line(&format!("{}", style(pkg.pkg_name).green()))?; - if offset < width as usize { - self.rc.term.move_cursor_up(1)?; - } - self.rc.term.move_cursor_right(offset)?; - self.rc - .term - .write_line(&format!("{}", style(pkg.pkg_summary).yellow()))?; - } - Ok(()) - } -} -impl SolvQuery { - pub fn apply_search(mut self, pkgs: Vec) -> Result { - let mut queue_sel = init_queue(); - let mut queue_result = init_queue(); - let mut di = create_dataiterator_empty(); - let pool = self.sack.pool; - unsafe { - let di_ptr = &mut di as *mut Dataiterator; - let queue_sel_ptr = &mut queue_sel as *mut Queue; - let queue_result_ptr = &mut queue_result as *mut Queue; - - for pkg in pkgs { - let pkg_ptr = CString::new(pkg.as_str()).unwrap(); - queue_empty(queue_sel_ptr); - queue_empty(queue_result_ptr); - dataiterator_init_simple( - di_ptr, - pool, - pkg_ptr.as_ptr(), - (SEARCH_SUBSTRING | SEARCH_NOCASE) as i32, - ); - - dataiterator_set_keyname(di_ptr, solv_knownid_SOLVABLE_NAME as i32); - dataiterator_set_search_simple(di_ptr); - while dataiterator_step(di_ptr) != 0 { - queue_push2(queue_sel_ptr, SOLVER_SOLVABLE as i32, di.solvid); - } - - dataiterator_set_keyname(di_ptr, solv_knownid_SOLVABLE_SUMMARY as i32); - dataiterator_set_search_simple(di_ptr); - while dataiterator_step(di_ptr) != 0 { - queue_push2(queue_sel_ptr, SOLVER_SOLVABLE as i32, di.solvid); - } - - dataiterator_set_keyname(di_ptr, solv_knownid_SOLVABLE_DESCRIPTION as i32); - dataiterator_set_search_simple(di_ptr); - while dataiterator_step(di_ptr) != 0 { - queue_push2(queue_sel_ptr, SOLVER_SOLVABLE as i32, di.solvid); - } - dataiterator_free(di_ptr); - - selection_solvables(pool, queue_sel_ptr, queue_result_ptr); - let q = &mut self.queue_result as *mut Queue; - queue_insertn( - q, - (*q).count, - (*queue_result_ptr).count, - (*queue_result_ptr).elements, - ); - } - queue_free(queue_sel_ptr); - queue_free(queue_result_ptr); - } - Ok(self) - } - pub fn get_query_result(self) -> Result> { - if self.queue_result.count == 0 { - bail!(ERROR_RDNF_NO_MATCH); - } - let mut pkg_infos = Vec::new(); - for index in 0..self.queue_result.count { - let pkg_id = get_queue_element_value(&self.queue_result as *const Queue, index as u32); - - let solv = pool_id2solvable(self.sack.pool, pkg_id); - let pkg_name = self.sack.solv_get_pkg_name_by_id(pkg_id)?; - let pkg_summary = unsafe { - let temp_summary_ptr = - solvable_lookup_str(solv, solv_knownid_SOLVABLE_SUMMARY as i32); - if temp_summary_ptr.is_null() { - bail!(ERROR_RDNF_NO_DATA); - } - CStr::from_ptr(temp_summary_ptr).to_str()?.to_string() - }; - let pkg_arch = unsafe { - CStr::from_ptr(solvable_lookup_str(solv, solv_knownid_SOLVABLE_ARCH as i32)) - .to_str()? - }; - let pkg_name = pkg_name + "." + pkg_arch; - let pkg_info = SearchPkgInfo { - pkg_id, - pkg_name, - pkg_summary, - }; - pkg_infos.push(pkg_info); - } - Ok(pkg_infos) - } -} -#[derive(Debug, Clone)] -pub struct SearchPkgInfo { - pub pkg_id: i32, - pub pkg_name: String, - pub pkg_summary: String, -} diff --git a/rdnf/src/sub_command/update.rs b/rdnf/src/sub_command/update.rs deleted file mode 100644 index d47b074a592bf02cb6180cd77b05a8b4322652a9..0000000000000000000000000000000000000000 --- a/rdnf/src/sub_command/update.rs +++ /dev/null @@ -1,296 +0,0 @@ -use std::{ - ffi::{CStr, CString}, -}; - -use anyhow::{bail, Result}; -use chrono::NaiveDateTime; -use libc::{atof}; -use solv_sys::ffi::{ - dataiterator_free, dataiterator_init, dataiterator_prepend_keyname, dataiterator_setpos, - dataiterator_setpos_parent, dataiterator_skip_solvable, dataiterator_step, pool_evrcmp, - pool_id2str, pool_lookup_id, pool_lookup_num, pool_lookup_str, pool_lookup_void, - solv_knownid_SOLVABLE_BUILDTIME, solv_knownid_SOLVABLE_DESCRIPTION, solv_knownid_SOLVABLE_NAME, - solv_knownid_SOLVABLE_PATCHCATEGORY, solv_knownid_UPDATE_COLLECTION, - solv_knownid_UPDATE_COLLECTION_ARCH, solv_knownid_UPDATE_COLLECTION_EVR, - solv_knownid_UPDATE_COLLECTION_FILENAME, solv_knownid_UPDATE_COLLECTION_NAME, - solv_knownid_UPDATE_REBOOT, solv_knownid_UPDATE_SEVERITY, Repo, EVRCMP_COMPARE, SEARCH_STRING, - SOLVID_POS, -}; - -use crate::{ - c_lib::{create_dataiterator_empty, pool_id2solvable, queue_push}, - errors::ERROR_RDNF_INVALID_PARAMETER, - solv::{rdnf_query::init_queue, sack::Solvsack, SolvPackageList}, - utils::c_str_ptr_to_rust_string, - Rdnf, -}; -pub enum UpdateInfoKind { - Unknown, - Security, - Bugfix, - Enhancement, -} -pub struct UpdateInfo { - kind: UpdateInfoKind, - pkg_id: Option, - pkg_date: Option, - pkg_desc: Option, - reboot_required: bool, - _refers: Vec, - pkgs: Vec, -} -impl UpdateInfo { - pub fn default() -> Self { - UpdateInfo { - kind: UpdateInfoKind::Unknown, - pkg_id: None, - pkg_date: None, - pkg_desc: None, - reboot_required: false, - _refers: Vec::::new(), - pkgs: Vec::::new(), - } - } -} -pub struct UpdateInfoRef { - pub pkg_id: Option, - pub pkg_link: Option, - pub pkg_title: Option, - pub pkg_type: Option, -} -pub struct UpdateInfoPkg { - pkg_name: Option, - pkg_file_name: Option, - pkg_evr: Option, - pkg_arch: Option, -} -impl UpdateInfoPkg { - pub fn default() -> Self { - UpdateInfoPkg { - pkg_name: None, - pkg_file_name: None, - pkg_evr: None, - pkg_arch: None, - } - } -} -impl Rdnf { - pub fn update_info(&self, pkg_name: Option>) -> Result> { - // self.make_cache()?; - let installed_pkg_list = match pkg_name { - Some(s) => self.rc.sack.solv_find_installed_pkg_by_multiple_names(s)?, - None => self.rc.sack.solv_find_all_installed()?, - }; - let security = self.rc.cli.security; - let sec_severity = self.rc.cli.sec_severity.clone(); - let reboot_required = self.rc.cli.reboot_required; - let count = installed_pkg_list.get_size(); - let mut infos=Vec::new(); - for index in 0..count { - let id = installed_pkg_list.get_pkg_id(index); - match self.rc.sack.solv_get_update_advisories(id) { - Err(_) => { - continue; - } - Ok(update_adv_pkg_list) => { - let ucount = update_adv_pkg_list.get_size(); - for uadv in 0..ucount { - let adv_id = update_adv_pkg_list.get_pkg_id(uadv); - let info=self.rc.sack.populate_updateinfo_of_one_advisory(adv_id, security, &sec_severity, reboot_required)?; - if info.is_some() { - infos.push(info.unwrap()) - } - - } - } - }; - } - if infos.len() >0{ - self.rc.term.write_line(&format!("{} updates.",infos.len()))?; - } - Ok(infos) - } - pub fn get_update_pkgs(&self,) -> Result> { - let update_infos=self.update_info(None)?; - let count=update_infos.len(); - let mut pkgs=Vec::new(); - if count>0{ - for update_info in update_infos { - for update_info_pkg in update_info.pkgs { - if update_info_pkg.pkg_name.is_some() { - pkgs.push(update_info_pkg.pkg_name.unwrap()) - } - } - } - } - Ok(pkgs) - } -} -impl Solvsack { - pub fn solv_get_update_advisories(&self, id: i32) -> Result { - let mut queue_adv = init_queue(); - let solvable = pool_id2solvable(self.pool, id); - if solvable.is_null() { - bail!(ERROR_RDNF_INVALID_PARAMETER); - }; - let pkg_name_ptr = unsafe { pool_id2str(self.pool, (*solvable).name) }; - let mut di = create_dataiterator_empty(); - unsafe { - dataiterator_init( - &mut di, - self.pool, - 0 as *mut Repo, - 0, - solv_knownid_UPDATE_COLLECTION_NAME as i32, - pkg_name_ptr, - SEARCH_STRING as i32, - ); - dataiterator_prepend_keyname(&mut di, solv_knownid_UPDATE_COLLECTION as i32); - while dataiterator_step(&mut di) != 0 { - dataiterator_setpos_parent(&mut di); - let arch = pool_lookup_id( - self.pool, - SOLVID_POS, - solv_knownid_UPDATE_COLLECTION_ARCH as i32, - ); - if arch != (*solvable).arch { - continue; - } - let evr = pool_lookup_id( - self.pool, - SOLVID_POS, - solv_knownid_UPDATE_COLLECTION_EVR as i32, - ); - if evr == 0 { - continue; - } - let cmp_result = - pool_evrcmp(self.pool, evr, (*solvable).evr, EVRCMP_COMPARE as i32); - if cmp_result > 0 { - queue_push(&mut queue_adv, di.solvid); - dataiterator_skip_solvable(&mut di); - } - } - }; - unsafe { dataiterator_free(&mut di) }; - SolvPackageList::queue_to_pkg_list(&mut queue_adv) - } - pub fn populate_updateinfo_of_one_advisory( - &self, - adv_id: i32, - security: bool, - sec_severity: &Option, - reboot_required: bool, - ) -> Result> { - let psz_type_ptr = unsafe { - pool_lookup_str( - self.pool, - adv_id, - solv_knownid_SOLVABLE_PATCHCATEGORY as i32, - ) - }; - let temp_ptr = - unsafe { pool_lookup_str(self.pool, adv_id, solv_knownid_UPDATE_SEVERITY as i32) }; - let reboot = - unsafe { pool_lookup_void(self.pool, adv_id, solv_knownid_UPDATE_REBOOT as i32) } == 1; - let mut keep_entry = true; - if security { - if !psz_type_ptr.is_null() { - if unsafe { CStr::from_ptr(psz_type_ptr).to_str()? } == "security" { - keep_entry = false; - } - } - } else if sec_severity.is_some() { - let severity = CString::new(sec_severity.as_ref().unwrap().as_str())?; - if temp_ptr.is_null() || unsafe { atof(severity.as_ptr()) > atof(temp_ptr) } { - keep_entry = false; - } - } - if reboot_required { - if reboot { - keep_entry = false; - } - } - if keep_entry { - let mut update_info = UpdateInfo::default(); - update_info.kind = if psz_type_ptr.is_null() { - UpdateInfoKind::Unknown - } else { - let psz_type = unsafe { CStr::from_ptr(psz_type_ptr).to_str()? }; - match psz_type { - "bugfix" => UpdateInfoKind::Bugfix, - "enhancement" => UpdateInfoKind::Enhancement, - "security" => UpdateInfoKind::Security, - _ => UpdateInfoKind::Unknown, - } - }; - update_info.reboot_required = reboot; - update_info.pkg_id=c_str_ptr_to_rust_string(unsafe { - pool_lookup_str(self.pool, adv_id, solv_knownid_SOLVABLE_NAME as i32) - }); - update_info.pkg_desc=c_str_ptr_to_rust_string(unsafe { - pool_lookup_str(self.pool, adv_id, solv_knownid_SOLVABLE_DESCRIPTION as i32) - }); - let updated = unsafe { - pool_lookup_num(self.pool, adv_id, solv_knownid_SOLVABLE_BUILDTIME as i32, 0) - }; - if updated > 0 { - update_info.pkg_date = - Some(NaiveDateTime::from_timestamp_opt(updated as i64, 0).unwrap()); - } - update_info.pkgs=self.get_update_info_pkgs(adv_id)?; - return Ok(Some(update_info)); - } - Ok(None) - } - pub fn get_update_info_pkgs(&self, id: i32) -> Result> { - let mut di = create_dataiterator_empty(); - unsafe { - dataiterator_init( - &mut di, - self.pool, - 0 as *mut Repo, - id, - solv_knownid_UPDATE_COLLECTION as i32, - 0 as *const i8, - 0, - ) - }; - let mut update_pkgs = Vec::new(); - while unsafe { dataiterator_step(&mut di) } != 0 { - unsafe { dataiterator_setpos(&mut di) }; - let mut update_pkg = UpdateInfoPkg::default(); - update_pkg.pkg_name = c_str_ptr_to_rust_string(unsafe { - pool_lookup_str( - self.pool, - SOLVID_POS as i32, - solv_knownid_UPDATE_COLLECTION_NAME as i32, - ) - }); - update_pkg.pkg_evr = c_str_ptr_to_rust_string(unsafe { - pool_lookup_str( - self.pool, - SOLVID_POS as i32, - solv_knownid_UPDATE_COLLECTION_EVR as i32, - ) - }); - update_pkg.pkg_arch = c_str_ptr_to_rust_string(unsafe { - pool_lookup_str( - self.pool, - SOLVID_POS as i32, - solv_knownid_UPDATE_COLLECTION_ARCH as i32, - ) - }); - update_pkg.pkg_file_name = c_str_ptr_to_rust_string(unsafe { - pool_lookup_str( - self.pool, - SOLVID_POS as i32, - solv_knownid_UPDATE_COLLECTION_FILENAME as i32, - ) - }); - update_pkgs.push(update_pkg); - } - unsafe{dataiterator_free(&mut di)}; - Ok(update_pkgs) - } -} diff --git a/rdnf/src/utils.rs b/rdnf/src/utils.rs index e52b7953d311025d0002b48f049f7f7c43ddfd38..bdefc62af4fba8b7c5e73904d29b9dfc21e89c01 100644 --- a/rdnf/src/utils.rs +++ b/rdnf/src/utils.rs @@ -1,76 +1,365 @@ -use std::ffi::CStr; - use anyhow::{bail, Result}; +use console::Term; +use futures_util::StreamExt; +use indicatif::{ProgressBar, ProgressStyle, MultiProgress}; use libc::geteuid; +use reqwest::header::CONTENT_LENGTH; +use reqwest::{Client}; +use std::fs::{read_dir, remove_dir, remove_file, File}; +use std::io::Read; +use std::time::{SystemTime}; +use std::{ path::Path, process::Command}; +use tokio::io::AsyncWriteExt; +use crate::error::DownloadError; +use crate::{error::SystemError}; -use crate::{ - default::RDNF_INSTANCE_LOCK_FILE, - lock::{flock_acquire, flock_release}, -}; - -use super::lock::{flock_new, RdnfFlockMode}; +pub fn get_os_arch() -> Result { + let output = Command::new("uname") + .arg("-m") + .output() + .map_err(|e| SystemError::ArchError(format!("{:?}", e)))?; + if output.status.success() { + return Ok(String::from_utf8_lossy(&output.stdout).trim().to_owned()); + } else { + return Err(SystemError::ArchError(format!("{:?}", output))); + }; +} +pub fn check_dir(path: &str) -> Result { + match std::fs::read_dir(path) { + Ok(c) => { + return Ok(c.count() > 0); + } + Err(_) => { + bail!("Dir {} don't exist", path) + } + } +} pub fn check_root() -> Result<()> { if unsafe { geteuid() } != 0 { bail!("root permission is required for this operation"); } Ok(()) } - -pub fn is_already_running() -> Result<()> { - if unsafe { geteuid() } == 0 { - let mut lock = flock_new(RDNF_INSTANCE_LOCK_FILE, "rdnf_instance")?; - if !flock_acquire(&mut lock, RdnfFlockMode::WriteRead)? { - match lock.openmode { - RdnfFlockMode::WriteRead => { - println!("waiting for {} lock on {}", lock.descr, lock.path); - if !flock_acquire(&mut lock, RdnfFlockMode::Wait)? { - flock_release(&mut lock); - bail!("can't create {} lock on {}", lock.descr, lock.path); +pub fn recursively_remove_dir>(dir: P) -> Result<()> { + if Path::new(dir.as_ref()).is_dir() { + match read_dir(dir.as_ref()) { + Ok(s) => { + for x in s { + let entry = x?; + let path = entry.path(); + if path.is_dir() { + recursively_remove_dir(&path)?; + } else if path.is_file() { + remove_file(path)?; } } - _ => { - flock_release(&mut lock); - bail!("Failed to acquire rdnf_instance lock") - } } + Err(_) => {} } + remove_dir(dir)?; } Ok(()) } -pub fn check_dir(path: &str) -> Result { - match std::fs::read_dir(path) { - Ok(c) => { - return Ok(c.count() > 0); +pub fn get_file_md5>(path: P) -> Result<[u8; 16]> { + let mut f = File::open(path)?; + let mut buf = Vec::new(); + f.read_to_end(&mut buf)?; + Ok(md5::compute(buf).0) +} + +pub fn read_file_to_string>(path:P)->Result{ + let mut f=File::open(path)?; + let mut buf=String::new(); + f.read_to_string(&mut buf)?; + Ok(buf) +} +pub async fn download_single_file>(client: &Client, url: &str, file_path: P) -> Result<(),DownloadError> { + let mut stream = client.get(url).send().await.map_err(|e|DownloadError::Network(format!("{:?}",e)))?.bytes_stream(); + let mut f = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .open(file_path) + .await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + while let Some(items) = stream.next().await { + let bytes = items.map_err(|e|DownloadError::Network(format!("{:?}",e)))?; + f.write(bytes.as_ref()).await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + } + f.flush().await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + Ok(()) +} +#[allow(dead_code)] +pub async fn download_single_file_with_pb>(client: &Client,url: &str,file_path: P,msg:&str,pb:&ProgressBar)->Result<(),DownloadError>{ + if let Ok(res) = head_file_size(client, url).await { + if let Some(len) = res { + pb.set_length(len); } - Err(_) => { - bail!("Dir {} don't exist", path) + } + pb.set_message(msg.to_owned()); + + let mut f = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .open(file_path) + .await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + + // dbg!("{}",file_path); + let mut stream = client.get(url).send().await.map_err(|e|DownloadError::Network(format!("{:?}",e)))?.bytes_stream(); + + let mut count: u64 = 0; + let earlier=SystemTime::now(); + let mut duration:u64=0; + + while let Some(items) = stream.next().await { + let bytes = items.map_err(|e|DownloadError::Network(format!("{:?}",e)))?; + count += bytes.len() as u64; + let p=SystemTime::now().duration_since(earlier).map_err(|_e| DownloadError::TimeError)?.as_secs(); + if p!=duration{ + duration=p; + pb.set_position(count); } + f.write(bytes.as_ref()).await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; } + f.flush().await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + Ok(()) } -pub fn format_size(size: u64) -> String { - let mut dsize = size as f32; - for i in ["b", "k", "M", "G"] { - if dsize >= 1024.0 { - dsize /= 1024.0; - } else { - return format!("{:.2}{}", dsize, i); + +pub async fn download_single_file_with_pb_decode>(client: &Client,url: &str,file_path: P,msg:&str,pb:&ProgressBar)->Result<(),DownloadError>{ + if let Ok(res) = head_file_size(client, url).await { + if let Some(len) = res { + pb.set_length(len); } } - format!("{:.2}T", dsize) -} -#[inline] -pub fn c_str_ptr_to_rust_string(ptr:*const i8)->Option{ - if ptr.is_null(){ - None - }else{ - unsafe{ - Some(CStr::from_ptr(ptr).to_str().unwrap().to_string()) + pb.set_message(msg.to_owned()); + + let f = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .open(file_path) + .await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + let mut f_de=async_compression::tokio::write::GzipDecoder::new(f); + let mut stream = client.get(url).send().await.map_err(|e|DownloadError::Network(format!("{:?}",e)))?.bytes_stream(); + + let mut count: u64 = 0; + let earlier=SystemTime::now(); + let mut duration:u64=0; + + while let Some(items) = stream.next().await { + let bytes = items.map_err(|e|DownloadError::Network(format!("{:?}",e)))?; + count += bytes.len() as u64; + let p=SystemTime::now().duration_since(earlier).map_err(|_e| DownloadError::TimeError)?.as_secs(); + if p!=duration{ + duration=p; + pb.set_position(count); } + f_de.write_all(bytes.as_ref()).await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + } + f_de.flush().await.map_err(|e|DownloadError::IoError(format!("{:?}",e)))?; + Ok(()) +} + +pub fn get_multi_progress(n:usize)->(MultiProgress,Vec){ + let m=MultiProgress::new(); + let (_, width) = Term::stdout().size(); + let style = format!( + "{{msg:{}}}{{spinner:.green}}[{{bar:{}.cyan/blue}}]{{bytes}}/{{total_bytes}} ({{bytes_per_sec}},{{eta}})", + width / 3, + width / 3 + ); + let style = ProgressStyle::with_template(style.as_str()) + .unwrap() + .progress_chars("#>-"); + let mut v=Vec::new(); + for index in 0..n{ + let pb=m.insert(index, ProgressBar::new(0)); + pb.set_style(style.clone()); + v.push(pb); } + (m,v) } -#[cfg(test)] -mod tests { - #[test] - pub fn test() {} +async fn head_file_size(client: &Client, url: &str) -> Result> { + let res = client.head(url).send().await?; + return if let Some(v) = res.headers().get(CONTENT_LENGTH) { + Ok(v.to_str()?.parse::().ok()) + } else { + Ok(None) + }; +} +mod tests{ + + #[tokio::test] + async fn test_downlad() { + use reqwest::ClientBuilder; + + use super::download_single_file; + let client = ClientBuilder::new().user_agent("rdnf").build().unwrap(); + download_single_file(&client, + "https://mirrors.aliyun.com/fedora/releases/37/Everything/x86_64/os/Packages/3/389-ds-base-2.2.3-1.fc37.x86_64.rpm", + "tests/test.iso").await.unwrap(); + // let p=RepoConfig::read_all("assest/repos.d"); + } + #[tokio::test] + async fn test_download_gzip(){ + use futures_util::StreamExt; + use reqwest::ClientBuilder; + use tokio::io::AsyncWriteExt; + + let fw=tokio::fs::OpenOptions::new().append(true).create(true).open("tests/primary.xml").await.unwrap(); + let client = ClientBuilder::new().user_agent("rdnf").build().unwrap(); + let url="https://mirrors.aliyun.com/fedora/releases/37/Everything/x86_64/os/repodata/54bbae6e9d4cd4865a55f7558daef86574cddc5f2a4f8a0d9c74f946e1a45dd3-primary.xml.gz"; + let mut stream=client.get(url).send().await.unwrap().bytes_stream(); + + let mut gz_f=async_compression::tokio::write::GzipDecoder::new(fw); + + while let Some(item) = stream.next().await { + let bytes=item.unwrap(); + gz_f.write_all(bytes.as_ref()).await.unwrap(); + + } + gz_f.flush().await.unwrap(); + } + #[tokio::test] + async fn test_download_with(){ + use futures_util::StreamExt; + use reqwest::ClientBuilder; + use tokio::io::AsyncWriteExt; + let mut fw=tokio::fs::OpenOptions::new().append(true).create(true).open("tests/primary.xml.gz").await.unwrap(); + let client = ClientBuilder::new().user_agent("rdnf").build().unwrap(); + let url="https://mirrors.aliyun.com/fedora/releases/37/Everything/x86_64/os/repodata/54bbae6e9d4cd4865a55f7558daef86574cddc5f2a4f8a0d9c74f946e1a45dd3-primary.xml.gz"; + let mut stream=client.get(url).send().await.unwrap().bytes_stream(); + + // let mut gz_f=async_compression::tokio::write::GzipDecoder::new(fw); + + while let Some(item) = stream.next().await { + let bytes=item.unwrap(); + // gz_f.write_all(bytes.as_ref()).await.unwrap(); + fw.write_all(bytes.as_ref()).await.unwrap(); + + } + fw.flush().await.unwrap(); + // gz_f.flush().await.unwrap(); + } + + + #[tokio::test] + async fn test_a(){ + + use reqwest::ClientBuilder; + + use crate::{error::DownloadError}; + let url="https://codecs.fedoraproject.org/openh264/37/x86_64/os/repodata/54a9de010375c9470f3bfa9b594cd36909289019474d9fc74f6f66a7f1450bf0-primary.xml.gz"; + let client = ClientBuilder::new().user_agent("rdnf").build().unwrap(); + let _stream = client.get(url).send().await.map_err(|e|DownloadError::Network(format!("{:?}",e))).unwrap().bytes_stream(); + } + #[tokio::test] + async fn test_mutli(){ + use futures_util::StreamExt; + use reqwest::ClientBuilder; + use tokio::io::AsyncWriteExt; + use crate::utils::get_multi_progress; + use std::time::SystemTime; + let package = vec![ + "389-ds-base-2.2.3-1.fc37.x86_64.rpm", + "389-ds-base-devel-2.2.3-1.fc37.x86_64.rpm", + "389-ds-base-libs-2.2.3-1.fc37.x86_64.rpm", + "389-ds-base-snmp-2.2.3-1.fc37.x86_64.rpm", + ]; + let prefix = + "https://mirrors.aliyun.com/fedora/releases/37/Everything/x86_64/os/Packages/3/"; + let (multi,mut pbs)=get_multi_progress(4); + let mut handles=Vec::new(); + for i in 0..4 { + let url=prefix.to_string().clone()+package[i]; + let file_path="tests/".to_string()+package[i]; + let pb=pbs.pop().unwrap(); + pb.set_message(package[i]); + handles.push(tokio::spawn(async move{ + let client = ClientBuilder::new().user_agent("rdnf").build().unwrap(); + let mut stream = client.get(url).send().await.unwrap().bytes_stream(); + let mut f = tokio::fs::OpenOptions::new() + .append(true) + .create(true) + .open(file_path) + .await.unwrap(); + let mut count: u64 = 0; + let earlier=SystemTime::now(); + let mut duration:u64=0; + while let Some(items) = stream.next().await { + let bytes = items.unwrap(); + count += bytes.len() as u64; + let p=SystemTime::now().duration_since(earlier).unwrap().as_secs(); + if p!=duration{ + duration=p; + pb.set_position(count); + } + f.write(bytes.as_ref()).await.unwrap(); + } + pb.finish_with_message("Done"); + })); + } + for ele in handles { + ele.await.unwrap(); + } + multi.clear().unwrap(); + } } + +// let m = MultiProgress::new(); +// let sty = ProgressStyle::with_template( +// "[{elapsed_precise}] {bar:40.cyan/blue} {pos:>7}/{len:7} {msg}", +// ) +// .unwrap() +// .progress_chars("##-"); + +// let pb = m.add(ProgressBar::new(128)); +// pb.set_style(sty.clone()); + +// let pb2 = m.insert_after(&pb, ProgressBar::new(128)); +// pb2.set_style(sty.clone()); + +// let pb3 = m.insert_after(&pb2, ProgressBar::new(1024)); +// pb3.set_style(sty); + +// m.println("starting!").unwrap(); + +// let m_clone = m.clone(); +// let h1 = thread::spawn(move || { +// for i in 0..128 { +// pb.set_message(format!("item #{}", i + 1)); +// pb.inc(1); +// thread::sleep(Duration::from_millis(15)); +// } +// m_clone.println("pb1 is done!").unwrap(); +// pb.finish_with_message("done"); +// }); + +// let m_clone = m.clone(); +// let h2 = thread::spawn(move || { +// for _ in 0..3 { +// pb2.set_position(0); +// for i in 0..128 { +// pb2.set_message(format!("item #{}", i + 1)); +// pb2.inc(1); +// thread::sleep(Duration::from_millis(8)); +// } +// } +// m_clone.println("pb2 is done!").unwrap(); +// pb2.finish_with_message("done"); +// }); + +// let m_clone = m.clone(); +// let h3 = thread::spawn(move || { +// for i in 0..1024 { +// pb3.set_message(format!("item #{}", i + 1)); +// pb3.inc(1); +// thread::sleep(Duration::from_millis(2)); +// } +// m_clone.println("pb3 is done!").unwrap(); +// pb3.finish_with_message("done"); +// }); + +// let _ = h1.join(); +// let _ = h2.join(); +// let _ = h3.join(); +// m.clear().unwrap(); + diff --git a/rdnf/tests.tar.gz b/rdnf/tests.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..53964a7ebe25e4c7742e85405ebd6bc70d32fe4c Binary files /dev/null and b/rdnf/tests.tar.gz differ