diff --git a/src/threadpool.c b/src/threadpool.c index c7294a68ae0fc9cc0a5f3f66e7840728b1291bcb..534994f6b7b546db43be159a4d5403e5405266c8 100644 --- a/src/threadpool.c +++ b/src/threadpool.c @@ -658,6 +658,8 @@ void uv__work_done(uv_async_t* handle) { QUEUE* q; QUEUE wq; int err; + char trace_name[25] = "TaskNumber_"; + char str[10]; loop = container_of(handle, uv_loop_t, wq_async); rdlock_closed_uv_loop_rwlock(); @@ -679,7 +681,13 @@ void uv__work_done(uv_async_t* handle) { } #endif uv_mutex_unlock(&loop->wq_mutex); + if (loop->active_reqs.count > TASK_NUMBER_WARNING) { + UV_LOGW("The number of task is too much, task number is %{public}d", loop->active_reqs.count); + } + snprintf(str, sizeof(str), "%d", loop->active_reqs.count); + strcat(trace_name, str); + uv_start_trace(UV_TRACE_TAG, trace_name); while (!QUEUE_EMPTY(&wq)) { q = QUEUE_HEAD(&wq); QUEUE_REMOVE(q); @@ -708,6 +716,7 @@ void uv__work_done(uv_async_t* handle) { post_statistic_work(&dump_work->wq); #endif } + uv_end_trace(UV_TRACE_TAG); rdunlock_closed_uv_loop_rwlock(); } @@ -736,7 +745,6 @@ static void uv__queue_done(struct uv__work* w, int err) { void uv__ffrt_work(ffrt_executor_task_t* data, ffrt_qos_t qos) { struct uv__work* w = (struct uv__work *)data; - uv_loop_t* loop = w->loop; #ifdef UV_STATISTIC uv__post_statistic_work(w, WORK_EXECUTING); #endif @@ -748,35 +756,35 @@ void uv__ffrt_work(ffrt_executor_task_t* data, ffrt_qos_t qos) #ifdef UV_STATISTIC uv__post_statistic_work(w, WORK_END); #endif - uv__loop_internal_fields_t* lfields = uv__get_internal_fields(loop); + uv__loop_internal_fields_t* lfields = uv__get_internal_fields(w->loop); rdlock_closed_uv_loop_rwlock(); - if (loop->magic != UV_LOOP_MAGIC + if (w->loop->magic != UV_LOOP_MAGIC || !lfields || qos >= ARRAY_SIZE(lfields->wq_sub) || !lfields->wq_sub[qos][0] || !lfields->wq_sub[qos][1]) { rdunlock_closed_uv_loop_rwlock(); UV_LOGE("uv_loop(%{public}zu:%{public}#x) in task(%p:%p) is invalid", - (size_t)loop, loop->magic, req->work_cb, req->after_work_cb); + (size_t)w->loop, w->loop->magic, req->work_cb, req->after_work_cb); return; } - uv_mutex_lock(&loop->wq_mutex); + uv_mutex_lock(&w->loop->wq_mutex); w->work = NULL; /* Signal uv_cancel() that the work req is done executing. */ - if (check_data_valid((struct uv_loop_data*)(loop->data)) == 0) { + if (check_data_valid((struct uv_loop_data*)(w->loop->data)) == 0) { struct uv_parm_t parm; parm.work = req; parm.status = 0; - struct uv_loop_data* addr = (struct uv_loop_data*)((uint64_t)loop->data - + struct uv_loop_data* addr = (struct uv_loop_data*)((uint64_t)w->loop->data - (UV_EVENT_MAGIC_OFFSET << UV_EVENT_MAGIC_OFFSETBITS)); addr->post_task_func(addr->event_handler, req->after_work_cb, (void*) &parm, qos); } else { QUEUE_INSERT_TAIL(&(lfields->wq_sub[qos]), &w->wq); - uv_async_send(&loop->wq_async); + uv_async_send(&w->loop->wq_async); } - uv_mutex_unlock(&loop->wq_mutex); + uv_mutex_unlock(&w->loop->wq_mutex); rdunlock_closed_uv_loop_rwlock(); }