1 Star 0 Fork 7

zeus/qperf

forked from src-openEuler/qperf 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
0001-qperf-multi-way-test-support.patch 21.62 KB
一键复制 编辑 原始数据 按行查看 历史
zeus 提交于 2025-02-28 20:23 +08:00 . qperf multi-way test support
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
From 739aa1f3ac6015468304392dbe666599b0a5e0f0 Mon Sep 17 00:00:00 2001
From: Haotian Chen <chenhaotian16@huawei.com>
Date: Fri, 28 Feb 2025 15:49:31 +0800
Subject: [PATCH] qperf multi-way test support
---
src/qperf.c | 230 +++++++++++++++++++++++++++++++++++++++++---------
src/qperf.h | 12 ++-
src/rdma.c | 32 +++++++
src/support.c | 24 +++---
4 files changed, 247 insertions(+), 51 deletions(-)
diff --git a/src/qperf.c b/src/qperf.c
index 29e6098..c0efd59 100644
--- a/src/qperf.c
+++ b/src/qperf.c
@@ -51,6 +51,7 @@
#include <sys/times.h>
#include <sys/select.h>
#include <sys/utsname.h>
+#include <sys/shm.h>
#include "qperf.h"
@@ -65,6 +66,9 @@
#define VER_INC 11 /* Incremental version */
#define LISTENQ 5 /* Size of listen queue */
#define BUFSIZE 1024 /* Size of buffers */
+#define MAX_CONNECTIONS 1024 /* Maximum number of connections */
+#define NS_TO_SECOND 1000000000
+#define KILO 1000
/*
@@ -279,7 +283,8 @@ char *TestName;
char *ServerName;
SS ServerAddr;
int ServerAddrLen;
-int RemoteFD;
+int RemoteFDArr[MAX_CONNECTIONS];
+int *RemoteFD = RemoteFDArr;
int Debug;
volatile int Finished;
@@ -306,6 +311,7 @@ PAR_NAME ParName[] ={
{ "time", L_TIME, R_TIME },
{ "timeout", L_TIMEOUT, R_TIMEOUT },
{ "use_cm", L_USE_CM, R_USE_CM },
+ { "no_connections", L_NO_CONN, R_NO_CONN },
};
@@ -351,6 +357,8 @@ PAR_INFO ParInfo[P_N] ={
{ R_TIMEOUT, 't', &RReq.timeout },
{ L_USE_CM, 'l', &Req.use_cm },
{ R_USE_CM, 'l', &RReq.use_cm },
+ { L_NO_CONN, 'l', &Req.no_conn },
+ { R_NO_CONN, 'l', &RReq.no_conn },
};
@@ -546,6 +554,8 @@ OPTION Options[] ={
{ "-V", "version", },
{ "--wait_server", "wait", },
{ "-ws", "wait", },
+ { "--no_connections", "int", L_NO_CONN, R_NO_CONN },
+ { "-nc", "int", L_NO_CONN, R_NO_CONN },
};
@@ -615,7 +625,7 @@ initialize(void)
{
int i;
- RemoteFD = -1;
+ *RemoteFD = -1;
for (i = 0; i < P_N; ++i)
if (ParInfo[i].index != i)
error(BUG, "initialize: ParInfo: out of order: %d", i);
@@ -766,15 +776,144 @@ do_args(char *args[])
}
}
+/*
+ * Called when using multi-proc to run tests.
+ * Encapsulation for Porcress Functions.
+ */
+void client_proc(TEST *test) {
+ // Avoid using the same ProcStatFD for different process
+ ProcStatFD = open("/proc/stat", 0);
+ if (ProcStatFD < 0)
+ error(SYS, "cannot open /proc/stat");
+ client(test);
+}
+unsigned long g_db_count;
/*
* Loop through a series of tests.
*/
static void
do_loop(LOOP *loop, TEST *test)
{
- if (!loop)
- client(test);
+ if (!loop) {
+ int pid = 0;
+ int allPids[MAX_CONNECTIONS];
+ int pidIndex = 0;
+ if (Req.no_conn <= 0) {
+ Req.no_conn = 1;
+ }
+ if (Req.no_conn > MAX_CONNECTIONS) {
+ error(0, "Too many connecitons specified, max is %d", MAX_CONNECTIONS);
+ }
+ if (!par_isset(L_NO_MSGS))
+ setp_u32(0, L_TIME, DEF_TIME);
+ if (!par_isset(R_NO_MSGS))
+ setp_u32(0, R_TIME, DEF_TIME);
+ setp_u32(0, L_TIMEOUT, DEF_TIMEOUT);
+ setp_u32(0, R_TIMEOUT, DEF_TIMEOUT);
+ par_use(L_AFFINITY);
+ par_use(R_AFFINITY);
+ par_use(L_TIME);
+ par_use(R_TIME);
+ if (par_isset(L_NO_CONN))
+ par_use(L_NO_CONN);
+ if (par_isset(R_NO_CONN))
+ par_use(R_NO_CONN);
+ init_lstat();
+ printf("%s for %d connections\n", test->name, Req.no_conn);
+ int pipefd[Req.no_conn][2];
+ int shmid;
+ // volatile
+ volatile int *shared;
+ //
+ shmid = shmget(IPC_PRIVATE, sizeof(int), IPC_CREAT | 0666);
+ if (shmid < 0) {
+ error(SYS|RET, "shmget failed");
+ }
+ shared = (int *)shmat(shmid, NULL, 0);
+ *shared = 0;
+ for (int i = 0; i < Req.no_conn; i++) {
+ if (pipe(pipefd[i]) == -1) {
+ error(SYS|RET, "pipe failed");
+ exit(1);
+ }
+ pid = fork();
+ allPids[pidIndex++] = pid;
+ if (pid < 0) {
+ error(SYS|RET, "fork failed");
+ continue;
+ }
+ if (pid > 0) {
+ continue;
+ }
+ if (pid == 0) {
+ while (*shared == 0) {
+ //
+ }
+ close(pipefd[i][0]); //
+ Req.affinity += pidIndex;
+ client_proc(test);
+ write(pipefd[i][1], &Res, sizeof(RES));
+ close(pipefd[i][1]); //
+ exit(0);
+ break;
+ }
+ }
+ *shared = 1;
+ for (int i = 0; i < Req.no_conn; i++) {
+ waitpid(allPids[i], NULL, 0);
+ }
+
+ setp_u32(0, L_MSG_SIZE, 1);
+ setp_u32(0, R_MSG_SIZE, 1);
+ //
+ RES allRes[Req.no_conn];
+ for (int i = 0; i < Req.no_conn; i++) {
+ close(pipefd[i][1]); //
+ read(pipefd[i][0], &allRes[i], sizeof(RES));
+ close(pipefd[i][0]); //
+ }
+ //
+ RES sumRes;
+ memset(&sumRes, 0, sizeof(sumRes));
+ for (int i = 0; i < Req.no_conn; i++) {
+ sumRes.send_bw += allRes[i].send_bw;
+ sumRes.recv_bw += allRes[i].recv_bw;
+ sumRes.msg_rate += allRes[i].msg_rate;
+ sumRes.send_cost += allRes[i].send_cost;
+ sumRes.recv_cost += allRes[i].recv_cost;
+ sumRes.latency += allRes[i].latency;
+ sumRes.latency_in_timestamp += allRes[i].latency_in_timestamp;
+ sumRes.latency_in_db += allRes[i].latency_in_db;
+ sumRes.all_latency_in_timestamp += allRes[i].all_latency_in_timestamp;
+ }
+ Res.send_bw = sumRes.send_bw / Req.no_conn;
+ Res.recv_bw = sumRes.recv_bw / Req.no_conn;
+ Res.msg_rate = sumRes.msg_rate / Req.no_conn;
+ Res.send_cost = sumRes.send_cost / Req.no_conn;
+ Res.recv_cost = sumRes.recv_cost / Req.no_conn;
+ Res.latency = sumRes.latency / Req.no_conn;
+ Res.latency_in_timestamp = sumRes.latency_in_timestamp / Req.no_conn;
+ Res.latency_in_db = sumRes.latency_in_db / Req.no_conn;
+ Res.all_latency_in_timestamp = sumRes.all_latency_in_timestamp / Req.no_conn;
+ int lenTestName = strlen(test->name);
+ if (streq(test->name + lenTestName - 3, "lat")) {
+ show_info(LATENCY);
+ } else if (streq(test->name + lenTestName - 3, "_mr") || test->name[0] == 'v') {
+ show_info(MSG_RATE);
+ } else if (test->name[0] == 'u') {
+ show_info(BANDWIDTH_SR);
+ view_band('a', "sum_", "send_bw", sumRes.send_bw);
+ view_band('a', "sum_", "recv_bw", sumRes.recv_bw);
+ } else if (streq(test->name + lenTestName - 3, "_bw")) {
+ show_info(BANDWIDTH);
+ view_band('a', "sum_", "bw", sumRes.recv_bw);
+ }
+ place_show();
+ //
+ shmdt((void*)shared);
+ shmctl(shmid, IPC_RMID, NULL);
+ }
else {
long l = loop->init;
@@ -1337,12 +1476,14 @@ static void
server(void)
{
server_listen();
+ int remoteFdIndex = -1;
for (;;) {
REQ req;
pid_t pid;
TEST *test;
int s = offset(REQ, req_index);
+ RemoteFD = &RemoteFDArr[(++remoteFdIndex) % MAX_CONNECTIONS];
debug("ready for requests");
if (!server_recv_request())
continue;
@@ -1353,10 +1494,14 @@ server(void)
}
if (pid > 0) {
remotefd_close();
- waitpid(pid, 0, 0);
continue;
}
+ close(ListenFD);
remotefd_setup();
+ // Avoid using the same ProcStatFD for different process
+ ProcStatFD = open("/proc/stat", 0);
+ if (ProcStatFD < 0)
+ error(SYS, "cannot open /proc/stat");
recv_mesg(&req, s, "request version");
dec_init(&req);
@@ -1374,6 +1519,7 @@ server(void)
init_lstat();
set_affinity();
(test->server)();
+ remotefd_close();
exit(0);
}
close(ListenFD);
@@ -1453,8 +1599,8 @@ server_recv_request(void)
SS clientAddr;
clientLen = sizeof(clientAddr);
- RemoteFD = accept(ListenFD, (struct sockaddr *)&clientAddr, &clientLen);
- if (RemoteFD < 0)
+ *RemoteFD = accept(ListenFD, (struct sockaddr *)&clientAddr, &clientLen);
+ if (*RemoteFD < 0)
return error(SYS|RET, "accept failed");
return 1;
}
@@ -1470,16 +1616,6 @@ client(TEST *test)
for (i = 0; i < P_N; ++i)
ParInfo[i].inuse = 0;
- if (!par_isset(L_NO_MSGS))
- setp_u32(0, L_TIME, DEF_TIME);
- if (!par_isset(R_NO_MSGS))
- setp_u32(0, R_TIME, DEF_TIME);
- setp_u32(0, L_TIMEOUT, DEF_TIMEOUT);
- setp_u32(0, R_TIMEOUT, DEF_TIMEOUT);
- par_use(L_AFFINITY);
- par_use(R_AFFINITY);
- par_use(L_TIME);
- par_use(R_TIME);
set_affinity();
RReq.ver_maj = VER_MAJ;
@@ -1489,10 +1625,8 @@ client(TEST *test)
TestName = test->name;
debug("sending request: %s", TestName);
init_lstat();
- printf("%s:\n", TestName);
(*test->client)();
remotefd_close();
- place_show();
}
@@ -1510,17 +1644,17 @@ client_send_request(void)
};
AI *ailist = getaddrinfo_port(ServerName, ListenPort, &hints);
- RemoteFD = -1;
+ *RemoteFD = -1;
if (ServerWait)
start_test_timer(ServerWait);
for (;;) {
for (a = ailist; a; a = a->ai_next) {
if (Finished)
break;
- RemoteFD = socket(a->ai_family, a->ai_socktype, a->ai_protocol);
- if (RemoteFD < 0)
+ *RemoteFD = socket(a->ai_family, a->ai_socktype, a->ai_protocol);
+ if (*RemoteFD < 0)
continue;
- if (connect(RemoteFD, a->ai_addr, a->ai_addrlen) != SUCCESS0) {
+ if (connect(*RemoteFD, a->ai_addr, a->ai_addrlen) != SUCCESS0) {
remotefd_close();
continue;
}
@@ -1528,7 +1662,7 @@ client_send_request(void)
memcpy(&ServerAddr, a->ai_addr, ServerAddrLen);
break;
}
- if (RemoteFD >= 0 || !ServerWait || Finished)
+ if (*RemoteFD >= 0 || !ServerWait || Finished)
break;
sleep(1);
}
@@ -1537,7 +1671,7 @@ client_send_request(void)
stop_test_timer();
freeaddrinfo(ailist);
- if (RemoteFD < 0)
+ if (*RemoteFD < 0)
error(0, "%s: failed to connect", ServerName);
remotefd_setup();
enc_init(&req);
@@ -1554,9 +1688,9 @@ remotefd_setup(void)
{
int one = 1;
- if (ioctl(RemoteFD, FIONBIO, &one) < 0)
+ if (ioctl(*RemoteFD, FIONBIO, &one) < 0)
error(SYS, "ioctl FIONBIO failed");
- if (fcntl(RemoteFD, F_SETOWN, getpid()) < 0)
+ if (fcntl(*RemoteFD, F_SETOWN, getpid()) < 0)
error(SYS, "fcntl F_SETOWN failed");
}
@@ -1568,8 +1702,8 @@ remotefd_setup(void)
static void
remotefd_close(void)
{
- close(RemoteFD);
- RemoteFD = -1;
+ close(*RemoteFD);
+ *RemoteFD = -1;
}
@@ -1781,7 +1915,7 @@ run_server_quit(void)
char buf[1];
sync_test();
- (void) read(RemoteFD, buf, sizeof(buf));
+ (void) read(*RemoteFD, buf, sizeof(buf));
kill(getppid(), SIGQUIT);
exit(0);
}
@@ -1885,12 +2019,25 @@ calc_results(void)
add_ustat(&RStat.s, &LStat.rem_s);
add_ustat(&RStat.r, &LStat.rem_r);
+ uint64_t LtimestampSum = Res.l.timestampSum;
+ uint64_t RtimestampSum = Res.r.timestampSum;
+ uint64_t LtimestampDb = Res.l.timestampDb;
+ uint64_t RtimestampDb = Res.r.timestampDb;
memset(&Res, 0, sizeof(Res));
+ Res.l.timestampSum = LtimestampSum;
+ Res.r.timestampSum = RtimestampSum;
+ Res.l.timestampDb = LtimestampDb;
+ Res.r.timestampDb = RtimestampDb;
calc_node(&Res.l, &LStat);
calc_node(&Res.r, &RStat);
no_msgs = LStat.r.no_msgs + RStat.r.no_msgs;
if (no_msgs)
Res.latency = Res.l.time_real / no_msgs;
+ if (no_msgs) {
+ Res.latency_in_timestamp = (Res.l.timestampSum + Res.r.timestampSum) / no_msgs / NS_TO_SECOND;
+ Res.latency_in_db = (Res.l.timestampDb + Res.r.timestampDb) / no_msgs / NS_TO_SECOND;
+ Res.all_latency_in_timestamp = (LStat.timestampSumAll + RStat.timestampSumAll) / no_msgs / NS_TO_SECOND;
+ }
locTime = Res.l.time_real;
remTime = Res.r.time_real;
@@ -1976,8 +2123,12 @@ calc_node(RESN *resn, STAT *stat)
int i;
CLOCK cpu;
double s = stat->time_e[T_REAL] - stat->time_s[T_REAL];
+ uint64_t timestampSum = resn->timestampSum;
+ uint64_t timestampDb = resn->timestampDb;
memset(resn, 0, sizeof(*resn));
+ resn->timestampSum = timestampSum;
+ resn->timestampDb = timestampDb;
if (s == 0)
return;
if (stat->no_ticks == 0)
@@ -2016,17 +2167,18 @@ static void
show_info(MEASURE measure)
{
if (measure == LATENCY) {
- view_time('a', "", "latency", Res.latency);
- view_rate('s', "", "msg_rate", Res.msg_rate);
+ view_time('a', "avg_", "latency", Res.latency);
+ view_time('a', "avg_", "all_latency_in_timestamp", Res.all_latency_in_timestamp);
+ view_rate('s', "avg_", "msg_rate", Res.msg_rate);
} else if (measure == MSG_RATE) {
- view_rate('a', "", "msg_rate", Res.msg_rate);
+ view_rate('a', "avg_", "msg_rate", Res.msg_rate);
} else if (measure == BANDWIDTH) {
- view_band('a', "", "bw", Res.recv_bw);
- view_rate('s', "", "msg_rate", Res.msg_rate);
+ view_band('a', "avg_", "bw", Res.recv_bw);
+ view_rate('s', "avg_", "msg_rate", Res.msg_rate);
} else if (measure == BANDWIDTH_SR) {
- view_band('a', "", "send_bw", Res.send_bw);
- view_band('a', "", "recv_bw", Res.recv_bw);
- view_rate('s', "", "msg_rate", Res.msg_rate);
+ view_band('a', "avg_", "send_bw", Res.send_bw);
+ view_band('a', "avg_", "recv_bw", Res.recv_bw);
+ view_rate('s', "avg_", "msg_rate", Res.msg_rate);
}
show_used();
view_cost('t', "", "send_cost", Res.send_cost);
@@ -2720,6 +2872,7 @@ enc_stat(STAT *host)
enc_ustat(&host->r);
enc_ustat(&host->rem_s);
enc_ustat(&host->rem_r);
+ enc_int(host->timestampSumAll, sizeof(host->timestampSumAll));
}
@@ -2742,6 +2895,7 @@ dec_stat(STAT *host)
dec_ustat(&host->r);
dec_ustat(&host->rem_s);
dec_ustat(&host->rem_r);
+ host->timestampSumAll = dec_int(sizeof(host->timestampSumAll));
}
diff --git a/src/qperf.h b/src/qperf.h
index 95613e3..6188b1b 100644
--- a/src/qperf.h
+++ b/src/qperf.h
@@ -129,6 +129,8 @@ typedef enum {
R_TIMEOUT,
L_USE_CM,
R_USE_CM,
+ L_NO_CONN,
+ R_NO_CONN,
P_N
} PAR_INDEX;
@@ -173,6 +175,7 @@ typedef struct REQ {
uint32_t use_cm; /* Use Connection Manager */
char id[STRSIZE]; /* Identifier */
char static_rate[STRSIZE]; /* Static rate */
+ uint32_t no_conn; /* Number of connections */
} REQ;
@@ -199,6 +202,8 @@ typedef struct STAT {
USTAT r; /* Receive statistics */
USTAT rem_s; /* Remote send statistics */
USTAT rem_r; /* Remote receive statistics */
+ uint64_t timestampSumAll; /* Sum time for send_to_recv in us */
+ uint64_t timestampSumSendDb; /* Sum time for send_db in us */
} STAT;
@@ -214,6 +219,8 @@ typedef struct RESN {
double cpu_idle; /* Idle time (fraction of cpu) */
double cpu_kernel; /* Kernel time (fraction of cpu) */
double cpu_io_wait; /* IO wait time (fraction of cpu) */
+ uint64_t timestampSum; /* Sum time for db_to_recv in us */
+ uint64_t timestampDb; /* Time used for send_db in us from mlx5 */
} RESN;
@@ -229,6 +236,9 @@ typedef struct RES {
double send_cost; /* Send cost */
double recv_cost; /* Receive cost */
double latency; /* Latency */
+ double latency_in_timestamp; /* timestamp latency for db_to_recv*/
+ double latency_in_db; /* timestamp latency for send_db */
+ double all_latency_in_timestamp; /* timestamp latency for send_to_recv*/
} RES;
@@ -368,6 +378,6 @@ extern char *TestName;
extern char *ServerName;
extern SS ServerAddr;
extern int ServerAddrLen;
-extern int RemoteFD;
+extern int *RemoteFD;
extern int Debug;
extern volatile int Finished;
diff --git a/src/rdma.c b/src/rdma.c
index b51ef70..55e976c 100644
--- a/src/rdma.c
+++ b/src/rdma.c
@@ -1046,6 +1046,20 @@ rd_pp_lat(int transport, IOMODE iomode)
show_results(LATENCY);
}
+extern unsigned long g_db_count;
+//
+static unsigned long get_timestamp_ns(void)
+{
+ unsigned long val, freq;
+ asm volatile("mrs %0, cntvct_el0" : "=r" (val));
+ asm volatile("mrs %0, cntfrq_el0" : "=r" (freq));
+
+ /* should never happen */
+ if (freq == 0)
+ return 0;
+
+ return 1000000000 / freq * val;
+}
/*
* Loop sending packets back and forth to measure ping-pong latency.
@@ -1054,6 +1068,8 @@ static void
rd_pp_lat_loop(DEVICE *dev, IOMODE iomode)
{
int done = 1;
+ static unsigned long curr_time_stamp, prev_time_stamp, send_time_stamp;
+ unsigned long *tmp_addr;
rd_post_recv_std(dev, 1);
sync_test();
@@ -1087,6 +1103,19 @@ rd_pp_lat_loop(DEVICE *dev, IOMODE iomode)
if (status == IBV_WC_SUCCESS) {
LStat.r.no_bytes += dev->msg_size;
LStat.r.no_msgs++;
+ //
+ curr_time_stamp = get_timestamp_ns();
+ memcpy(&prev_time_stamp, dev->buffer, sizeof(unsigned long));
+ Res.l.timestampSum += curr_time_stamp - prev_time_stamp;
+ memcpy(&send_time_stamp, dev->buffer + 3 * sizeof(unsigned long), sizeof(unsigned long));
+ LStat.timestampSumAll += curr_time_stamp - send_time_stamp;
+ tmp_addr = dev->buffer + sizeof(unsigned long);
+ if (*tmp_addr) {
+ if (g_db_count < *tmp_addr)
+ g_db_count = *tmp_addr;
+ tmp_addr++;
+ Res.l.timestampDb += *tmp_addr;
+ }
rd_post_recv_std(dev, 1);
} else
do_error(status, &LStat.r.no_errs);
@@ -2405,6 +2434,9 @@ rd_post_send(DEVICE *dev, int off, int len, int inc, int rep, int stat)
wr.send_flags |= IBV_SEND_INLINE;
errno = 0;
+ static unsigned long timestamp_before_send = 0;
+ timestamp_before_send = get_timestamp_ns();
+ memcpy(dev->buffer + 3 * sizeof(unsigned long), &timestamp_before_send, sizeof(unsigned long));
while (!Finished && rep-- > 0) {
if (ibv_post_send(dev->qp, &wr, &badwr) != SUCCESS0) {
if (Finished && errno == EINTR)
diff --git a/src/support.c b/src/support.c
index 6c82bb0..bb50036 100644
--- a/src/support.c
+++ b/src/support.c
@@ -277,7 +277,7 @@ send_mesg(void *ptr, int len, char *item)
{
if (item)
debug("sending %s", item);
- return send_recv_mesg('s', item, RemoteFD, ptr, len);
+ return send_recv_mesg('s', item, *RemoteFD, ptr, len);
}
@@ -289,7 +289,7 @@ recv_mesg(void *ptr, int len, char *item)
{
if (item)
debug("waiting for %s", item);
- return send_recv_mesg('r', item, RemoteFD, ptr, len);
+ return send_recv_mesg('r', item, *RemoteFD, ptr, len);
}
@@ -426,7 +426,7 @@ urgent(void)
* returned. This is likely not even possible with the current code flow
* but we check just in case.
*/
- if (RemoteFD < 0)
+ if (*RemoteFD < 0)
return;
/*
@@ -437,7 +437,7 @@ urgent(void)
* this case to cause us concern in the normal case, we do not expect this
* to ever occur. If it does, we let the lower levels deal with it.
*/
- if (recv(RemoteFD, buffer, 1, MSG_OOB) != 1)
+ if (recv(*RemoteFD, buffer, 1, MSG_OOB) != 1)
return;
/*
@@ -463,17 +463,17 @@ urgent(void)
timeout_set(ERROR_TIMEOUT, sig_alrm_remote_failure);
for (;;) {
- int s = sockatmark(RemoteFD);
+ int s = sockatmark(*RemoteFD);
if (s < 0)
remote_failure_error();
if (s)
break;
- (void) read(RemoteFD, p, q-p);
+ (void) read(*RemoteFD, p, q-p);
}
while (p < q) {
- int n = read(RemoteFD, p, q-p);
+ int n = read(*RemoteFD, p, q-p);
if (n <= 0)
break;
@@ -555,12 +555,12 @@ error(int actions, char *fmt, ...)
if ((actions & RET) != 0)
return 0;
- if (RemoteFD >= 0) {
- send(RemoteFD, "?", 1, MSG_OOB);
- (void) write(RemoteFD, buffer, p-buffer);
- shutdown(RemoteFD, SHUT_WR);
+ if (*RemoteFD >= 0) {
+ send(*RemoteFD, "?", 1, MSG_OOB);
+ (void) write(*RemoteFD, buffer, p-buffer);
+ shutdown(*RemoteFD, SHUT_WR);
timeout_set(ERROR_TIMEOUT, sig_alrm_die);
- while (read(RemoteFD, buffer, sizeof(buffer)) > 0)
+ while (read(*RemoteFD, buffer, sizeof(buffer)) > 0)
;
}
die();
--
2.33.0
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/zeuscht/qperf.git
git@gitee.com:zeuscht/qperf.git
zeuscht
qperf
qperf
master

搜索帮助