在Redis中,使用list来在内存中保存当前实例的慢日志,并使用slowlog_entry_id来记录下一条慢日志的编号,每次新插入慢日志时会slowlog_entry_id值进行加一操作。
struct redisServer {
list *slowlog; /* SLOWLOG list of commands */
long long slowlog_entry_id; /* SLOWLOG current entry ID */
long long slowlog_log_slower_than; /* SLOWLOG time limit (to get logged) */
unsigned long slowlog_max_len; /* SLOWLOG max number of items logged */
}
并通过参数slowlog-log-slower-than
和slowlog-max-len
来控制慢日志:
使用slowlogEntry来存放单条慢日志信息,其中包含:客户端信息、请求命令信息、慢日志编号、请求执行时间、请求执行耗时等信息。
/* This structure defines an entry inside the slow log list */
typedef struct slowlogEntry {
robj **argv;
int argc;
long long id; /* Unique entry identifier. */
long long duration; /* Time spent by the query, in microseconds. */
time_t time; /* Unix time at which the query was executed. */
sds cname; /* Client name. */
sds peerid; /* Client network address. */
} slowlogEntry;
在每条命令请求执行时(call函数调用),会计算该命令的执行耗时,并作为是否记录到慢日志的判定标准。
该执行耗时不包含:
/* Call() is the core of Redis execution of a command.
*/
void call(client *c, int flags) {
......
/* Call the command. */
start = server.ustime;
c->cmd->proc(c);
duration = ustime()-start;
/* Log the command into the Slow log if needed, and populate the
* per-command statistics that we show in INFO commandstats. */
if (flags & CMD_CALL_SLOWLOG && c->cmd->proc != execCommand) {
char *latency_event = (c->cmd->flags & CMD_FAST) ?
"fast-command" : "command";
latencyAddSampleIfNeeded(latency_event,duration/1000);
slowlogPushEntryIfNeeded(c,c->argv,c->argc,duration);
......
}
在尝试插入慢日志的函数(slowlogPushEntryIfNeeded)中,执行下面操作:
/* Push a new entry into the slow log.
* This function will make sure to trim the slow log accordingly to the
* configured max length. */
void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration) {
if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */
if (duration >= server.slowlog_log_slower_than)
listAddNodeHead(server.slowlog,
slowlogCreateEntry(c,argv,argc,duration));
/* Remove old entries if needed. */
while (listLength(server.slowlog) > server.slowlog_max_len)
listDelNode(server.slowlog,listLast(server.slowlog));
}
在生成慢日志对象(slowlogCreateEntry)时,会对请求命令的信息进行裁剪,避免单条慢日志信息过大,同时会对server.slowlog_entry_id进行递增。
/* Create a new slowlog entry.
* Incrementing the ref count of all the objects retained is up to
* this function. */
slowlogEntry *slowlogCreateEntry(client *c, robj **argv, int argc, long long duration) {
slowlogEntry *se = zmalloc(sizeof(*se));
int j, slargc = argc;
if (slargc > SLOWLOG_ENTRY_MAX_ARGC) slargc = SLOWLOG_ENTRY_MAX_ARGC;
se->argc = slargc;
se->argv = zmalloc(sizeof(robj*)*slargc);
for (j = 0; j < slargc; j++) {
/* Logging too many arguments is a useless memory waste, so we stop
* at SLOWLOG_ENTRY_MAX_ARGC, but use the last argument to specify
* how many remaining arguments there were in the original command. */
if (slargc != argc && j == slargc-1) {
se->argv[j] = createObject(OBJ_STRING,
sdscatprintf(sdsempty(),"... (%d more arguments)",
argc-slargc+1));
} else {
/* Trim too long strings as well... */
if (argv[j]->type == OBJ_STRING &&
sdsEncodedObject(argv[j]) &&
sdslen(argv[j]->ptr) > SLOWLOG_ENTRY_MAX_STRING)
{
sds s = sdsnewlen(argv[j]->ptr, SLOWLOG_ENTRY_MAX_STRING);
s = sdscatprintf(s,"... (%lu more bytes)",
(unsigned long)
sdslen(argv[j]->ptr) - SLOWLOG_ENTRY_MAX_STRING);
se->argv[j] = createObject(OBJ_STRING,s);
} else if (argv[j]->refcount == OBJ_SHARED_REFCOUNT) {
se->argv[j] = argv[j];
} else {
/* Here we need to dupliacate the string objects composing the
* argument vector of the command, because those may otherwise
* end shared with string objects stored into keys. Having
* shared objects between any part of Redis, and the data
* structure holding the data, is a problem: FLUSHALL ASYNC
* may release the shared string object and create a race. */
se->argv[j] = dupStringObject(argv[j]);
}
}
}
se->time = time(NULL);
se->duration = duration;
se->id = server.slowlog_entry_id++;
se->peerid = sdsnew(getClientPeerId(c));
se->cname = c->name ? sdsnew(c->name->ptr) : sdsempty();
return se;
}
由于Redis中根据命令请求在工作线程中真实执行时间
来判断是否记录到慢日志列表
中,因此不能简单依据慢日志信息
来判断是否存在客户端请求执行慢的情况。
当出现客户端请求Redis较慢时,需排查出现问题时间点:
原文:https://www.cnblogs.com/gaogao67/p/15121325.html