public String set(final String key, final String value) { return new JedisClusterCommand<String>(connectionHandler, maxRedirections) { @Override public String execute(Jedis connection) { return connection.set(key, value); } }.run(key); }? ? ?
public T run(int keyCount, String... keys) { if (keys == null || keys.length == 0) { throw new JedisClusterException("No way to dispatch this command to Redis Cluster."); } if (keys.length > 1) { int slot = JedisClusterCRC16.getSlot(keys[0]); for (int i = 1; i < keyCount; i++) { int nextSlot = JedisClusterCRC16.getSlot(keys[i]); if (slot != nextSlot) { throw new JedisClusterException("No way to dispatch this command to Redis Cluster " + "because keys have different slots."); } } } return runWithRetries(SafeEncoder.encode(keys[0]), this.redirections, false, false); } private T runWithRetries(byte[] key, int redirections, boolean tryRandomNode, boolean asking) { if (redirections <= 0) { JedisClusterMaxRedirectionsException exception = new JedisClusterMaxRedirectionsException( "Too many Cluster redirections? key=" + SafeEncoder.encode(key)); throw exception; } Jedis connection = null; try { if (asking) { // TODO: Pipeline asking with the original command to make it // faster.... connection = askConnection.get(); connection.asking(); // if asking success, reset asking flag asking = false; } else { if (tryRandomNode) { connection = connectionHandler.getConnection(); } else { connection = connectionHandler.getConnectionFromSlot(JedisClusterCRC16.getSlot(key)); } } return execute(connection); } catch (JedisConnectionException jce) { if (tryRandomNode) { // maybe all connection is down throw jce; } // release current connection before recursion releaseConnection(connection); connection = null; // retry with random connection return runWithRetries(key, redirections - 1, true, asking); } catch (JedisRedirectionException jre) { // if MOVED redirection occurred, if (jre instanceof JedisMovedDataException) { // it rebuilds cluster‘s slot cache // recommended by Redis cluster specification this.connectionHandler.renewSlotCache(connection); } // release current connection before recursion or renewing releaseConnection(connection); connection = null; if (jre instanceof JedisAskDataException) { asking = true; askConnection.set(this.connectionHandler.getConnectionFromNode(jre.getTargetNode())); } else if (jre instanceof JedisMovedDataException) { } else { throw new JedisClusterException(jre); } return runWithRetries(key, redirections - 1, false, asking); } finally { releaseConnection(connection); } }? ? ? ?
throw new JedisClusterException("No way to dispatch this command to Redis Cluster because keys have different slots.");? ? ?3. 参数解释
private T runWithRetries(byte[] key, int redirections, boolean tryRandomNode, boolean asking) {
key=v11Pay|huid|wlunm99_561555097 key=play_anchorroom_info_529460 key=v11Pay|huid|qq-qhncnxujax key=play_anchor_info_qq-luzvfcftnf key=play_anchor_info_qq-luzvfcftnf key=play_anchorroom_info_550649 key=play_anchor_info_qq-cfrkukhdsd key=play_anchor_info_qq-rbufgcqbvk?经过查询,这些key都同时定位在一个redis实例上,于是看了一下这个redis实例的日志(与异常时间点对应),发现如下:AOF fsync发生了异常,以经验看是本地IO使用较大造成的。
17932:M 19 Oct 04:35:30.010 * Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis. 17932:M 19 Oct 04:35:41.087 * Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis. 17932:M 19 Oct 04:35:47.044 * Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis. 17932:M 19 Oct 10:15:51.463 * Starting automatic rewriting of AOF on 1795% growth?看了一下tsar的历史记录:tsar?--io?-n?2?|?head?-200
Time????????????rrqms???wrqms??????rs??????ws???rsecs???wsecs??rqsize??qusize???await???svctm????util
19/10/15-04:00 0.00 164.08 0.01 34.52 0.04 745.15 21.58 0.00 25.30 4.13 14.26
19/10/15-04:05 40.38 1.1K 218.49 78.39 13.9K 4.9K 64.55 7.00 24.63 2.80 83.19
19/10/15-04:10 37.15 1.0K 360.58 71.91 13.4K 4.3K 42.04 6.00 14.67 1.70 73.34
19/10/15-04:15 1.99 1.5K 21.98 115.38 588.69 6.6K 53.12 5.00 39.86 1.98 27.14
19/10/15-04:20 40.17 1.0K 278.00 76.79 10.4K 4.2K 42.32 4.00 11.48 1.60 56.85
19/10/15-04:25 78.28 861.13 381.34 62.33 14.3K 3.6K 41.40 4.00 9.85 1.51 66.78
19/10/15-04:30 81.64 913.85 402.37 55.35 15.1K 3.8K 42.18 4.00 9.47 1.41 64.71
19/10/15-04:35 21.92 888.72 145.97 58.00 16.2K 3.7K 99.71 4.00 20.57 3.63 74.04
19/10/15-04:40 39.72 474.01 169.01 48.26 14.3K 2.0K 77.09 3.00 17.83 4.14 89.89
19/10/15-04:45 47.02 537.60 149.41 41.50 16.7K 2.3K 101.55 3.00 18.27 4.21 80.35
00 04 * * * sh /opt/script/logcron.sh 00 04 * * * sh /opt/script/logremove.sh?
Redis Cluster:Too many Cluster redirections异常
原文:http://carlosfu.iteye.com/blog/2251034