diff --git a/0001-print-log-info.patch b/0001-print-log-info.patch new file mode 100644 index 0000000000000000000000000000000000000000..3439d0b23e4e1a4da0d23be421a14d23d39f336e --- /dev/null +++ b/0001-print-log-info.patch @@ -0,0 +1,857 @@ +diff --git a/src/c/ebpf_collector/ebpf_collector.bpf.c b/src/c/ebpf_collector/ebpf_collector.bpf.c +index a7ea51b..d079c95 100644 +--- a/src/c/ebpf_collector/ebpf_collector.bpf.c ++++ b/src/c/ebpf_collector/ebpf_collector.bpf.c +@@ -122,7 +122,7 @@ struct bpf_map_def SEC("maps") tag_res_2 = { + + struct blk_mq_alloc_data { + /* input parameter */ +- struct request_queue *q; ++ struct request_queue_kylin *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + +@@ -134,9 +134,9 @@ struct blk_mq_alloc_data { + struct request_kylin { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; ++ struct blk_mq_hw_ctx *mq_hctx; + +- int cpu; +- unsigned int cmd_flags; /* op and common flags */ ++ unsigned int cmd_flags; /* op and common flags */ + req_flags_t rq_flags; + + int internal_tag; +@@ -144,7 +144,7 @@ struct request_kylin { + /* the following two fields are internal, NEVER access directly */ + unsigned int __data_len; /* total data len */ + int tag; +- sector_t __sector; /* sector cursor */ ++ sector_t __sector; /* sector cursor */ + + struct bio *bio; + struct bio *biotail; +@@ -159,7 +159,7 @@ struct request_kylin { + * the dispatch list). + */ + union { +- struct hlist_node hash; /* merge hash */ ++ struct hlist_node hash; /* merge hash */ + struct list_head ipi_list; + }; + +@@ -171,7 +171,7 @@ struct request_kylin { + * completion_data share space with the rb_node. + */ + union { +- struct rb_node rb_node; /* sort/lookup */ ++ struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; + void *completion_data; + int error_count; /* for legacy drivers, don't use */ +@@ -198,7 +198,11 @@ struct request_kylin { + + struct gendisk *rq_disk; + struct hd_struct *part; +- /* Time that I/O was submitted to the kernel. */ ++#ifdef CONFIG_BLK_RQ_ALLOC_TIME ++ /* Time that the first bio started allocating this request. */ ++ u64 alloc_time_ns; ++#endif ++ /* Time that this request was allocated for this IO. */ + u64 start_time_ns; + /* Time that I/O was submitted to the device. */ + u64 io_start_time_ns; +@@ -223,9 +227,9 @@ struct request_kylin { + unsigned short write_hint; + unsigned short ioprio; + +- void *special; /* opaque pointer available for LLD use */ ++ void *special; /* opaque pointer available for LLD use */ + +- unsigned int extra_len; /* length of alignment and padding */ ++ unsigned int extra_len; /* length of alignment and padding */ + + enum mq_rq_state state; + refcount_t ref; +@@ -235,8 +239,6 @@ struct request_kylin { + /* access through blk_rq_set_deadline, blk_rq_deadline */ + unsigned long __deadline; + +- struct list_head timeout_list; +- + union { + struct __call_single_data csd; + u64 fifo_time; +@@ -250,12 +252,401 @@ struct request_kylin { + + /* for bidi */ + struct request_kylin *next_rq; ++ KABI_RESERVE(1); ++ KABI_RESERVE(2); ++}; ++ ++struct request_queue_kylin { ++ /* ++ * Together with queue_head for cacheline sharing ++ */ ++ struct list_head queue_head; ++ struct request *last_merge; ++ struct elevator_queue *elevator; ++ ++ struct blk_queue_stats *stats; ++ struct rq_qos *rq_qos; ++ ++ make_request_fn *make_request_fn; ++ poll_q_fn *poll_fn; ++ dma_drain_needed_fn *dma_drain_needed; ++ ++ const struct blk_mq_ops *mq_ops; ++ ++ /* sw queues */ ++ struct blk_mq_ctx __percpu *queue_ctx; ++ unsigned int nr_queues; ++ ++ unsigned int queue_depth; ++ ++ /* hw dispatch queues */ ++ struct blk_mq_hw_ctx **queue_hw_ctx; ++ unsigned int nr_hw_queues; ++ ++ struct backing_dev_info_kylin *backing_dev_info; ++ ++ /* ++ * The queue owner gets to use this for whatever they like. ++ * ll_rw_blk doesn't touch it. ++ */ ++ void *queuedata; ++ ++ /* ++ * various queue flags, see QUEUE_* below ++ */ ++ unsigned long queue_flags; ++ /* ++ * Number of contexts that have called blk_set_pm_only(). If this ++ * counter is above zero then only RQF_PM and RQF_PREEMPT requests are ++ * processed. ++ */ ++ atomic_t pm_only; ++ ++ /* ++ * ida allocated id for this queue. Used to index queues from ++ * ioctx. ++ */ ++ int id; ++ ++ /* ++ * queue needs bounce pages for pages above this limit ++ */ ++ gfp_t bounce_gfp; ++ ++ /* ++ * protects queue structures from reentrancy. ->__queue_lock should ++ * _never_ be used directly, it is queue private. always use ++ * ->queue_lock. ++ */ ++ spinlock_t __queue_lock; ++ spinlock_t *queue_lock; ++ ++ /* ++ * queue kobject ++ */ ++ struct kobject kobj; ++ ++ /* ++ * mq queue kobject ++ */ ++ struct kobject *mq_kobj; ++ ++#ifdef CONFIG_BLK_DEV_INTEGRITY ++ struct blk_integrity integrity; ++#endif /* CONFIG_BLK_DEV_INTEGRITY */ ++ ++#ifdef CONFIG_PM ++ struct device *dev; ++ int rpm_status; ++ unsigned int nr_pending; ++#endif ++ ++ /* ++ * queue settings ++ */ ++ unsigned long nr_requests; /* Max # of requests */ + ++ unsigned int dma_drain_size; ++ void *dma_drain_buffer; ++ unsigned int dma_pad_mask; ++ unsigned int dma_alignment; ++ ++ unsigned int rq_timeout; ++ int poll_nsec; ++ ++ struct blk_stat_callback *poll_cb; ++ struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; ++ ++ struct timer_list timeout; ++ struct work_struct timeout_work; ++ ++ atomic_t nr_active_requests_shared_sbitmap; ++ ++ struct list_head icq_list; + #ifdef CONFIG_BLK_CGROUP +- struct request_list *rl; /* rl this rq is alloced from */ ++ DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); ++ struct blkcg_gq *root_blkg; ++ struct list_head blkg_list; + #endif +- KABI_RESERVE(1); +- KABI_RESERVE(2); ++ ++ struct queue_limits limits; ++ ++ unsigned int required_elevator_features; ++ ++#ifdef CONFIG_BLK_DEV_ZONED ++ /* ++ * Zoned block device information for request dispatch control. ++ * nr_zones is the total number of zones of the device. This is always ++ * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones ++ * bits which indicates if a zone is conventional (bit clear) or ++ * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones ++ * bits which indicates if a zone is write locked, that is, if a write ++ * request targeting the zone was dispatched. All three fields are ++ * initialized by the low level device driver (e.g. scsi/sd.c). ++ * Stacking drivers (device mappers) may or may not initialize ++ * these fields. ++ * ++ * Reads of this information must be protected with blk_queue_enter() / ++ * blk_queue_exit(). Modifying this information is only allowed while ++ * no requests are being processed. See also blk_mq_freeze_queue() and ++ * blk_mq_unfreeze_queue(). ++ */ ++ unsigned int nr_zones; ++ unsigned long *seq_zones_bitmap; ++ unsigned long *seq_zones_wlock; ++#endif /* CONFIG_BLK_DEV_ZONED */ ++ ++ /* ++ * sg stuff ++ */ ++ unsigned int sg_timeout; ++ unsigned int sg_reserved_size; ++ int node; ++#ifdef CONFIG_BLK_DEV_IO_TRACE ++ struct blk_trace __rcu *blk_trace; ++ struct mutex blk_trace_mutex; ++#endif ++ /* ++ * for flush operations ++ */ ++ struct blk_flush_queue *fq; ++ ++ struct list_head requeue_list; ++ spinlock_t requeue_lock; ++ struct delayed_work requeue_work; ++ ++ struct mutex sysfs_lock; ++ ++ /* ++ * for reusing dead hctx instance in case of updating ++ * nr_hw_queues ++ */ ++ struct list_head unused_hctx_list; ++ spinlock_t unused_hctx_lock; ++ ++ int mq_freeze_depth; ++ ++#if defined(CONFIG_BLK_DEV_BSG) ++ struct bsg_class_device bsg_dev; ++#endif ++ ++#ifdef CONFIG_BLK_DEV_THROTTLING ++ /* Throttle data */ ++ struct throtl_data *td; ++#endif ++ struct rcu_head rcu_head; ++ wait_queue_head_t mq_freeze_wq; ++ /* ++ * Protect concurrent access to q_usage_counter by ++ * percpu_ref_kill() and percpu_ref_reinit(). ++ */ ++ struct mutex mq_freeze_lock; ++ struct percpu_ref q_usage_counter; ++ struct list_head all_q_node; ++ ++ struct blk_mq_tag_set *tag_set; ++ struct list_head tag_set_list; ++ struct bio_set bio_split; ++ ++#ifdef CONFIG_BLK_DEBUG_FS ++ struct dentry *debugfs_dir; ++ struct dentry *sched_debugfs_dir; ++#endif ++ ++ bool mq_sysfs_init_done; ++ ++ size_t cmd_size; ++ void *rq_alloc_data; ++ ++ struct work_struct release_work; ++ ++#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC ++ /* used when QUEUE_FLAG_DISPATCH_ASYNC is set */ ++ struct cpumask dispatch_async_cpus; ++ int __percpu *last_dispatch_cpu; ++#endif ++ ++#define BLK_MAX_WRITE_HINTS 5 ++ u64 write_hints[BLK_MAX_WRITE_HINTS]; ++ ++ KABI_RESERVE(1); ++ KABI_RESERVE(2); ++ KABI_RESERVE(3); ++ KABI_RESERVE(4); ++}; ++ ++struct backing_dev_info_kylin { ++ u64 id; ++ struct rb_node rb_node; /* keyed by ->id */ ++ struct list_head bdi_list; ++ unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ ++ unsigned long io_pages; /* max allowed IO size */ ++ congested_fn *congested_fn; /* Function pointer if device is md/dm */ ++ void *congested_data; /* Pointer to aux data for congested func */ ++ ++ const char *name; ++ ++ struct kref refcnt; /* Reference counter for the structure */ ++ unsigned int capabilities; /* Device capabilities */ ++ unsigned int min_ratio; ++ unsigned int max_ratio, max_prop_frac; ++ ++ /* ++ * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are ++ * any dirty wbs, which is depended upon by bdi_has_dirty(). ++ */ ++ atomic_long_t tot_write_bandwidth; ++ ++ struct bdi_writeback wb; /* the root writeback info for this bdi */ ++ struct list_head wb_list; /* list of all wbs */ ++#ifdef CONFIG_CGROUP_WRITEBACK ++ struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ ++ struct rb_root cgwb_congested_tree; /* their congested states */ ++ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ ++ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ ++#else ++ struct bdi_writeback_congested *wb_congested; ++#endif ++ wait_queue_head_t wb_waitq; ++ ++ union { ++ struct rcu_device *rcu_dev; ++ struct device *dev; ++ }; ++ struct device *owner; ++ ++ struct timer_list laptop_mode_wb_timer; ++ ++#ifdef CONFIG_DEBUG_FS ++ struct dentry *debug_dir; ++ struct dentry *debug_stats; ++#endif ++ ++ KABI_RESERVE(1) ++ KABI_RESERVE(2) ++ KABI_RESERVE(3) ++ KABI_RESERVE(4) ++}; ++ ++struct device_kylin { ++ struct device_kylin *parent; ++ ++ struct device_private *p; ++ ++ struct kobject kobj; ++ const char *init_name; /* initial name of the device */ ++ const struct device_type *type; ++ ++ struct mutex mutex; /* mutex to synchronize calls to ++ * its driver. ++ */ ++ ++ struct bus_type *bus; /* type of bus device is on */ ++ struct device_driver *driver; /* which driver has allocated this ++ device */ ++ void *platform_data; /* Platform specific data, device ++ core doesn't touch it */ ++ void *driver_data; /* Driver data, set and get with ++ dev_set/get_drvdata */ ++ struct dev_links_info links; ++ struct dev_pm_info power; ++ struct dev_pm_domain *pm_domain; ++ ++#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN ++ struct irq_domain *msi_domain; ++#endif ++#ifdef CONFIG_PINCTRL ++ struct dev_pin_info *pins; ++#endif ++#ifdef CONFIG_GENERIC_MSI_IRQ ++ struct list_head msi_list; ++#endif ++ ++#ifdef CONFIG_NUMA ++ int numa_node; /* NUMA node this device is close to */ ++#endif ++ const struct dma_map_ops *dma_ops; ++ u64 *dma_mask; /* dma mask (if dma'able device) */ ++ u64 coherent_dma_mask;/* Like dma_mask, but for ++ alloc_coherent mappings as ++ not all hardware supports ++ 64 bit addresses for consistent ++ allocations such descriptors. */ ++ u64 bus_dma_mask; /* upstream dma_mask constraint */ ++ unsigned long dma_pfn_offset; ++ ++ struct device_dma_parameters *dma_parms; ++ ++ struct list_head dma_pools; /* dma pools (if dma'ble) */ ++ ++ struct dma_coherent_mem *dma_mem; /* internal for coherent mem ++ override */ ++#ifdef CONFIG_DMA_CMA ++ struct cma *cma_area; /* contiguous memory area for dma ++ allocations */ ++#endif ++ /* arch specific additions */ ++ struct dev_archdata archdata; ++ ++ struct device_node *of_node; /* associated device tree node */ ++ struct fwnode_handle *fwnode; /* firmware device node */ ++ ++ dev_t devt; /* dev_t, creates the sysfs "dev" */ ++ u32 id; /* device instance */ ++ ++ spinlock_t devres_lock; ++ struct list_head devres_head; ++ ++ struct klist_node knode_class; ++ struct class *class; ++ const struct attribute_group **groups; /* optional groups */ ++ ++ void (*release)(struct device *dev); ++ struct iommu_group *iommu_group; ++ struct iommu_fwspec *iommu_fwspec; ++ struct iommu_param *iommu_param; ++ ++ bool offline_disabled:1; ++ bool offline:1; ++ bool of_node_reused:1; ++#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ ++ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ ++ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) ++ bool dma_coherent:1; ++#endif ++ ++#ifdef CONFIG_GENERIC_MSI_IRQ ++#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_DEBUG_LOCK_ALLOC) ++#ifndef __GENKSYMS__ ++ union { ++ raw_spinlock_t msi_lock; ++ unsigned long kabi_reserve1; ++ }; ++#else ++ KABI_RESERVE(1) ++#endif ++#else ++ raw_spinlock_t msi_lock; ++ KABI_RESERVE(1) ++#endif ++#else ++ KABI_RESERVE(1) ++#endif ++ KABI_RESERVE(2) ++ KABI_RESERVE(3) ++ KABI_RESERVE(4) ++ KABI_RESERVE(5) ++ KABI_RESERVE(6) ++ KABI_RESERVE(7) ++ KABI_RESERVE(8) ++ KABI_RESERVE(9) ++ KABI_RESERVE(10) ++ KABI_RESERVE(11) ++ KABI_RESERVE(12) ++ KABI_RESERVE(13) ++ KABI_RESERVE(14) ++ KABI_RESERVE(15) ++ KABI_RESERVE(16) + }; + + static __always_inline void blk_fill_rwbs(char *rwbs, unsigned int op) +@@ -1358,17 +1749,25 @@ int kretprobe_wbt_wait(struct pt_regs *regs) + SEC("kprobe/blk_mq_get_tag") + int kprobe_blk_mq_get_tag(struct pt_regs *regs) + { ++ bpf_printk("get_tag start1"); + u64 tagkey = bpf_get_current_task(); + u64 value = (u64)PT_REGS_PARM1(regs); + (void)bpf_map_update_elem(&tag_args, &tagkey, &value, BPF_ANY); + struct blk_mq_alloc_data *bd= (struct blk_mq_alloc_data *)value; +- struct request_queue *q = _(bd->q); +- struct backing_dev_info *backing_dev_info = _(q->backing_dev_info); ++ struct request_queue_kylin *q = (struct request_queue_kylin *)_(bd->q); ++ struct backing_dev_info_kylin *backing_dev_info = (struct backing_dev_info_kylin *)_(q->backing_dev_info); + struct device *owner = _(backing_dev_info->owner); + dev_t devt = _(owner->devt); ++ bpf_printk("get_tag start devt1111111111111 = %p\n", bd); ++ bpf_printk("get_tag start devt1111111111112 = %p\n", q); ++ bpf_printk("get_tag start devt11111111111131 = %p\n", backing_dev_info); ++ bpf_printk("get_tag start devt11111111111132 = %p\n", _(q->make_request_fn)); ++ bpf_printk("get_tag start devt11111111111133 = %d\n", _(q->queue_depth)); ++ bpf_printk("get_tag start devt1111111111114 = %p\n", owner); + int major = MAJOR(devt); + int first_minor = MINOR(devt); + unsigned int cmd_flags = 0; ++ bpf_printk("get_tag start cmd_flags = %d, major = %d, first_minor= %d\n", cmd_flags, major, first_minor); + + struct io_counter *counterp, zero = {}; + u32 key = find_matching_tag_1_keys(major, first_minor); +@@ -1381,6 +1780,7 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs) + if (key >= MAP_SIZE){ + key = find_matching_tag_5_keys(major, first_minor); + if (key >= MAP_SIZE){ ++ bpf_printk("get_tag start return 1"); + return 0; + } + } +@@ -1391,11 +1791,17 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs) + init_io_counter(&zero, major, first_minor); + + counterp = bpf_map_lookup_elem(&tag_map, &tagkey); +- if (counterp || major == 0) ++ bpf_printk("get_tag start counterp = %p\n", counterp); ++ if (counterp || major == 0) { ++ bpf_printk("get_tag start return 2"); + return 0; ++ } ++ + long err = bpf_map_update_elem(&tag_map, &tagkey, &zero, BPF_NOEXIST); +- if (err) ++ if (err) { ++ bpf_printk("get_tag start return 3"); + return 0; ++ } + + u64 curr_start_range = zero.start_time / THRESHOLD; + +@@ -1434,7 +1840,7 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs) + __sync_fetch_and_add(&curr_data_time_range->count[key], 1); + } + } +- ++ bpf_printk("get_tag start2"); + return 0; + } + +@@ -1442,6 +1848,7 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs) + SEC("kretprobe/blk_mq_get_tag") + int kretprobe_blk_mq_get_tag(struct pt_regs *regs) + { ++ bpf_printk("get_tag end1"); + u64 tagkey = bpf_get_current_task(); + u64 *tagargs = NULL; + struct blk_mq_alloc_data *bd = NULL; +@@ -1449,17 +1856,22 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs) + tagargs = (u64 *)bpf_map_lookup_elem(&tag_args, &tagkey); + if (tagargs == NULL) { + bpf_map_delete_elem(&tag_args, &tagkey); +- return 0; ++ bpf_printk("get_tag end return 1"); ++ return 0; + } + bd = (struct blk_mq_alloc_data *)(*tagargs); +- struct request_queue *q = _(bd->q); +- struct backing_dev_info *backing_dev_info = _(q->backing_dev_info); ++ // struct request_queue *q = _(bd->q); ++ // struct backing_dev_info *backing_dev_info = _(q->backing_dev_info); ++ struct request_queue_kylin *q = (struct request_queue_kylin *)_(bd->q); ++ struct backing_dev_info_kylin *backing_dev_info = (struct backing_dev_info_kylin *)_(q->backing_dev_info); + struct device *owner = _(backing_dev_info->owner); + dev_t devt = _(owner->devt); + int major = MAJOR(devt); + int first_minor = MINOR(devt); + unsigned int cmd_flags = 0; + ++ bpf_printk("get_tag end cmd_flags = %d, major = %d, first_minor= %d\n", cmd_flags, major, first_minor); ++ + struct io_counter *counterp; + u32 key = find_matching_tag_1_keys(major, first_minor); + if (key >= MAP_SIZE){ +@@ -1471,6 +1883,7 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs) + if (key >= MAP_SIZE){ + key = find_matching_tag_5_keys(major, first_minor); + if (key >= MAP_SIZE){ ++ bpf_printk("get_tag end return 2"); + return 0; + } + } +@@ -1480,8 +1893,10 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs) + + counterp = bpf_map_lookup_elem(&tag_map, &tagkey); + +- if (!counterp) ++ if (!counterp) { ++ bpf_printk("get_tag end return 3"); + return 0; ++ } + + u64 duration = bpf_ktime_get_ns() - counterp->start_time; + u64 curr_start_range = counterp->start_time / THRESHOLD; +@@ -1537,9 +1952,9 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs) + + bpf_map_delete_elem(&tag_map, &tagkey); + bpf_map_delete_elem(&tag_args, &tagkey); ++ bpf_printk("get_tag end2"); + return 0; + } + + char LICENSE[] SEC("license") = "Dual BSD/GPL"; + u32 _version SEC("version") = LINUX_VERSION_CODE; +- +diff --git a/src/c/ebpf_collector/ebpf_collector.c b/src/c/ebpf_collector/ebpf_collector.c +index af452c8..52a02bb 100644 +--- a/src/c/ebpf_collector/ebpf_collector.c ++++ b/src/c/ebpf_collector/ebpf_collector.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -36,11 +37,25 @@ + #define TAG_RES_2 (map_fd[13]) + #define BPF_FILE "/usr/lib/ebpf_collector.bpf.o" + ++#define MAX_LINE_LENGTH 1024 ++#define MAX_SECTION_NAME_LENGTH 256 ++#define CONFIG_FILE "/etc/sysSentry/collector.conf" ++ + typedef struct { + int major; + int minor; + } DeviceInfo; + ++typedef enum { ++ LOG_LEVEL_NONE, ++ LOG_LEVEL_DEBUG, ++ LOG_LEVEL_ERROR, ++ LOG_LEVEL_WARNING, ++ LOG_LEVEL_INFO ++} LogLevel; ++ ++LogLevel currentLogLevel = LOG_LEVEL_INFO; ++ + static volatile bool exiting; + + const char argp_program_doc[] = +@@ -162,14 +177,17 @@ static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size, in + size_t length = strlen(counter.io_type); + char io_type; + if (length > 0) { ++ logMessage(LOG_LEVEL_DEBUG, "io_type have value.\n"); + io_type = counter.io_type[0]; + } else { +- io_type = NULL; ++ logMessage(LOG_LEVEL_DEBUG, "io_type not value.\n"); ++ io_type = 'W'; + } + int major = counter.major; + int first_minor = counter.first_minor; + dev_t dev = makedev(major, first_minor); + char *device_name = find_device_name(dev); ++ logMessage(LOG_LEVEL_DEBUG, "device_name: %s\n", device_name); + if (device_name && io_type) { + printf("%-7s %10llu %10llu %d %c %s\n", + stage, +@@ -203,6 +221,102 @@ int init_map(int *map_fd, const char *map_name, int *map_size, DeviceInfo *devic + return 0; + } + ++char *read_config_value(const char *file, const char *section, const char *key) { ++ FILE *fp = fopen(file, "r"); ++ if (fp == NULL) { ++ perror("Failed to open config file"); ++ return NULL; ++ } ++ ++ char line[MAX_LINE_LENGTH]; ++ char current_section[MAX_SECTION_NAME_LENGTH] = {0}; ++ char *value = NULL; ++ ++ while (fgets(line, sizeof(line), fp) != NULL) { ++ line[strcspn(line, "\n")] = 0; ++ ++ if (line[0] == '\0' || line[0] == ';' || line[0] == '#') { ++ continue; ++ } ++ ++ if (line[0] == '[') { ++ sscanf(line, "[%[^]]", current_section); ++ continue; ++ } ++ ++ if (strcmp(current_section, section) == 0) { ++ char *delimiter = "="; ++ char *token = strtok(line, delimiter); ++ if (token != NULL) { ++ if (strcmp(token, key) == 0) { ++ token = strtok(NULL, delimiter); ++ if (token != NULL) { ++ value = strdup(token); ++ break; ++ } ++ } ++ } ++ } ++ } ++ ++ fclose(fp); ++ return value; ++} ++ ++void setLogLevel(const char *levelStr) { ++ if (strcmp(levelStr, "info") == 0) { ++ currentLogLevel = LOG_LEVEL_INFO; ++ } ++ else if (strcmp(levelStr, "warning") == 0) { ++ currentLogLevel = LOG_LEVEL_WARNING; ++ } ++ else if (strcmp(levelStr, "error") == 0) { ++ currentLogLevel = LOG_LEVEL_ERROR; ++ } ++ else if (strcmp(levelStr, "debug") == 0) { ++ currentLogLevel = LOG_LEVEL_DEBUG; ++ } ++ else if (strcmp(levelStr, "none") == 0) { ++ currentLogLevel = LOG_LEVEL_NONE; ++ } ++ else { ++ fprintf(stderr, "Unknown log level: %s\n", levelStr); ++ currentLogLevel = LOG_LEVEL_INFO; ++ } ++} ++ ++void logMessage(LogLevel level, const char *format, ...){ ++ va_list args; ++ if (level >= currentLogLevel) { ++ va_start(args, format); ++ vprintf(format, args); ++ va_end(args); ++ } ++} ++ ++int check_for_device(const char *device_name) { ++ char path[256]; ++ snprintf(path, sizeof(path), "/sys/block/%s", device_name); ++ ++ DIR *dir = opendir(path); ++ if (dir == NULL) { ++ return 0; ++ } ++ ++ struct dirent *entry; ++ while ((entry = readdir(dir)) != NULL) { ++ struct stat statbuf; ++ if (stat(path, &statbuf) == 0) { ++ if (S_ISDIR(statbuf.st_mode)) { ++ closedir(dir); ++ return 1; ++ } ++ } ++ } ++ closedir(dir); ++ return 0; ++} ++ + int main(int argc, char **argv) { + struct partitions *partitions = NULL; + const struct partition *partition; +@@ -222,15 +336,28 @@ int main(int argc, char **argv) { + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + setrlimit(RLIMIT_MEMLOCK, &r); + ++ char *level = read_config_value(CONFIG_FILE, "log", "level"); ++ if (level != NULL) { ++ if (level[strlen(level) - 1] == '\r') { ++ size_t len = strlen(level); ++ level[len - 1] = '\0'; ++ } ++ ++ setLogLevel(level); ++ free(level); ++ } ++ + err = argp_parse(&argp, argc, argv, 0, NULL, NULL); + if (err) + return err; + + snprintf(filename, sizeof(filename), BPF_FILE); + ++ logMessage(LOG_LEVEL_DEBUG, " load_bpf_file before.\n"); + if (load_bpf_file(filename)) { + return 1; + } ++ logMessage(LOG_LEVEL_DEBUG, " load_bpf_file after.\n"); + + signal(SIGINT, sig_handler); + +@@ -240,20 +367,28 @@ int main(int argc, char **argv) { + return EXIT_FAILURE; + } + ++ + while ((entry = readdir(dir)) != NULL) { +- if (entry->d_type == DT_BLK) { +- snprintf(path, sizeof(path), "/dev/%s", entry->d_name); +- struct stat statbuf; +- if (lstat(path, &statbuf) == 0) { +- if (S_ISBLK(statbuf.st_mode)) { +- devices[device_count].major = major(statbuf.st_rdev); +- devices[device_count].minor = minor(statbuf.st_rdev); +- device_count++; +- if (device_count >= MAP_SIZE) { +- break; +- } +- } +- } ++ if (entry->d_type != DT_BLK) { ++ continue; ++ } ++ snprintf(path, sizeof(path), "/dev/%s", entry->d_name); ++ struct stat statbuf; ++ if (lstat(path, &statbuf) != 0 && !S_ISBLK(statbuf.st_mode)) { ++ continue; ++ } ++ if (!strncmp(entry->d_name, "dm-", 3) || !strncmp(entry->d_name, "loop", 4) || ++ !strncmp(entry->d_name, "md", 2)) { ++ continue; ++ } ++ if (!check_for_device(entry->d_name)) { ++ continue; ++ } ++ devices[device_count].major = major(statbuf.st_rdev); ++ devices[device_count].minor = minor(statbuf.st_rdev); ++ device_count++; ++ if (device_count >= MAP_SIZE) { ++ break; + } + } + +@@ -306,4 +441,3 @@ int main(int argc, char **argv) { + + return -err; + } +- diff --git a/sysSentry.spec b/sysSentry.spec index fe12957e505d34df34bf245924c376b9615b9406..86d7fcf4467dd8e72c87b1d61262f0021a0ba601 100644 --- a/sysSentry.spec +++ b/sysSentry.spec @@ -4,7 +4,7 @@ Summary: System Inspection Framework Name: sysSentry Version: 1.0.2 -Release: 67 +Release: 70 License: Mulan PSL v2 Group: System Environment/Daemons Source0: https://gitee.com/openeuler/sysSentry/releases/download/v%{version}/%{name}-%{version}.tar.gz @@ -83,6 +83,7 @@ Patch70: fix-excessive-CPU-usage.patch Patch71: change-avg_block_io-config.patch Patch72: update-nvme-config.patch Patch73: fix-absolute-threshold-not-be-used.patch +Patch74: 0001-print-log-info.patch BuildRequires: cmake gcc-c++ BuildRequires: python3 python3-setuptools @@ -354,6 +355,12 @@ rm -rf %{buildroot} %attr(0550,root,root) %{python3_sitelib}/sentryCollector/__pycache__/collect_plugin* %changelog +* Fri Nov 15 2024 heyouzhi - 1.0.2-68 +- Type:bugfix +- CVE:NA +- SUG:NA +- DESC:pirnt log info + * Fri Nov 15 2024 heyouzhi - 1.0.2-67 - Type:bugfix - CVE:NA