Well,不要刷屏了

es查询队列挂满,导致es拒绝清切

Elasticsearch | 作者 liudongyang | 发布于2019年06月03日 | 阅读数:10006

Caused by: org.elasticsearch.common.util.concurrent.EsRejectedExecutionException: rejected execution of org.elasticsearch.transport.TransportService$7@6f3e918 on EsThreadPoolExecutor[search, queue capacity = 5000, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@430f230[Running, pool size = 13, active threads = 13, queued tasks = 5000, completed tasks = 8773540]]
at org.elasticsearch.common.util.concurrent.EsAbortPolicy.rejectedExecution(EsAbortPolicy.java:50) ~[elasticsearch-5.6.1.jar:5.6.1]
at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:823) ~[?:1.8.0_65]
at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1369) ~[?:1.8.0_65]
at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.doExecute(EsThreadPoolExecutor.java:94) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.execute(EsThreadPoolExecutor.java:89) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService.sendLocalRequest(TransportService.java:640) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService.access$000(TransportService.java:74) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService$3.sendRequest(TransportService.java:137) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService.sendRequestInternal(TransportService.java:586) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService.sendRequest(TransportService.java:519) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService.sendChildRequest(TransportService.java:546) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService.sendChildRequest(TransportService.java:537) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.action.search.SearchTransportService.sendExecuteQuery(SearchTransportService.java:148) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.action.search.SearchQueryThenFetchAsyncAction.executePhaseOnShard(SearchQueryThenFetchAsyncAction.java:51) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.action.search.InitialSearchPhase.performPhaseOnShard(InitialSearchPhase.java:160) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.action.search.InitialSearchPhase.onShardFailure(InitialSearchPhase.java:104) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.action.search.InitialSearchPhase.access$100(InitialSearchPhase.java:46) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.action.search.InitialSearchPhase$1.onFailure(InitialSearchPhase.java:169) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.action.ActionListenerResponseHandler.handleException(ActionListenerResponseHandler.java:51) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleException(TransportService.java:1067) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TcpTransport.lambda$handleException$16(TcpTransport.java:1456) ~[elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.common.util.concurrent.EsExecutors$1.execute(EsExecutors.java:110) [elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TcpTransport.handleException(TcpTransport.java:1454) [elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TcpTransport.handlerResponseError(TcpTransport.java:1446) [elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.TcpTransport.messageReceived(TcpTransport.java:1390) [elasticsearch-5.6.1.jar:5.6.1]
at org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:74) [transport-netty4-5.6.1.jar:5.6.1]
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310) [netty-codec-4.1.13.Final.jar:4.1.13.Final]
at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:297) [netty-codec-4.1.13.Final.jar:4.1.13.Final]
at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:413) [netty-codec-4.1.13.Final.jar:4.1.13.Final]
at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:265) [netty-codec-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458) [netty-transport-4.1.13.Final.jar:4.1.13.Final]
at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858) [netty-common-4.1.13.Final.jar:4.1.13.Final]
at java.lang.Thread.run(Thread.java:745) [?:1.8.0_65]
已邀请:

bellengao - 博客: https://www.jianshu.com/u/e0088e3e2127

赞同来自:

看看是不是分片在节点上分布的是否均匀,分布的不均匀容易出现队列打满的情况,可以通过重新规划分片配置,更加充分的利用节点的资源;如果分片分配的没有问题,可以扩容处理

liudongyang

赞同来自:

您好,首先感谢您在百忙中回复我的问题,分片不合理这个我回去查一下的,我想问就是扩容处理能解决这个问题吗,我现在的环境就两个节点,并且队列我都调整到5000了还是会满,扩容的话会降低单节点的压力吗

aslan1011 - 运维

赞同来自:

请问你是怎么调整队列大小的?还有如何确认调整队列后生效了?我现在也遇到了相同的问题,es版本是6.5

code4j - coder github: https://github.com/rpgmakervx

赞同来自:

如果是读远大于写的场景,可以尝试增加副本数,减少分片数,使搜索请求负载到更多机器上。

要回复问题请先登录注册