Failed to start up second node jboss-5.1.0.GA with jgroups TimeoutException was thrown
johnchan_2000hkhk Aug 7, 2013 10:25 PMDear all,
I have attached our configure file for clustering and the error log was thrown after start up Jboss.
After using netstat tool to check, it should udp-sync port (30402) does not work. May be the router doesn't support udp-sync.
Does it have another suggest to change the udp-sync setting as I doesn't have right to change the setting of router?
server.log
2013-07-30 10:07:10,025 [DEBUG] Thread-19 [TRY] [TRYDN] [com.arjuna.ats.jta.logging.loggerI18N] at [ttcp-gp5app1] [com.arjuna.ats.internal.jta.recovery.info.secondpass] Local XARecoveryModule - second pass
2013-07-30 10:07:10,846 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:10,846 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:13,332 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:13,577 [DEBUG] Timer-1,10.123.89.29:7900 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:7900 (own address=10.123.89.29:7900)
2013-07-30 10:07:16,849 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:16,850 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:19,334 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:19,580 [DEBUG] Timer-3,10.123.89.29:7900 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:7900 (own address=10.123.89.29:7900)
2013-07-30 10:07:22,852 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:22,853 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:25,336 [DEBUG] Timer-7,10.123.89.29:43110 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:54966 (own address=10.123.89.29:43110)
2013-07-30 10:07:25,583 [DEBUG] Timer-3,10.123.89.29:7900 [TRY] [TRYDN] [org.jgroups.protocols.FD] at [ttcp-gp5app1] sending are-you-alive msg to 10.123.89.30:7900 (own address=10.123.89.29:7900)
2013-07-30 10:07:26,676 [ERROR] main [TRY] [TRYDN] [org.jboss.system.server.profileservice.repository.ScopedProfileServiceController] at [ttcp-gp5app1] Error installing to Create: name=ProfileKey@75bb28[domain=default, server=default, name=farm] state=Configured mode=On Demand requiredState=Installed
java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.jboss.system.server.profileservice.repository.AbstractProfileLifeCycleAction.invoke(AbstractProfileLifeCycleAction.java:97)
at org.jboss.system.server.profileservice.repository.AbstractProfileLifeCycleAction.invoke(AbstractProfileLifeCycleAction.java:77)
at org.jboss.system.server.profileservice.repository.AbstractProfileLifeCycleAction.install(AbstractProfileLifeCycleAction.java:49)
at org.jboss.system.server.profileservice.repository.AbstractProfileAction.install(AbstractProfileAction.java:53)
at org.jboss.system.server.profileservice.repository.AbstractProfileService.install(AbstractProfileService.java:361)
at org.jboss.dependency.plugins.AbstractControllerContext.install(AbstractControllerContext.java:348)
at org.jboss.dependency.plugins.AbstractController.install(AbstractController.java:1631)
at org.jboss.dependency.plugins.AbstractController.incrementState(AbstractController.java:934)
at org.jboss.dependency.plugins.AbstractController.resolveContexts(AbstractController.java:1082)
at org.jboss.dependency.plugins.AbstractController.resolveContexts(AbstractController.java:984)
at org.jboss.dependency.plugins.AbstractController.change(AbstractController.java:822)
at org.jboss.dependency.plugins.AbstractController.change(AbstractController.java:553)
at org.jboss.system.server.profileservice.repository.AbstractProfileService.activateProfile(AbstractProfileService.java:306)
at org.jboss.system.server.profileservice.ProfileServiceBootstrap.start(ProfileServiceBootstrap.java:271)
at org.jboss.bootstrap.AbstractServerImpl.start(AbstractServerImpl.java:461)
at org.jboss.Main.boot(Main.java:221)
at org.jboss.Main$1.run(Main.java:556)
at java.lang.Thread.run(Thread.java:724)
Caused by: java.lang.RuntimeException: org.jgroups.TimeoutException: timeout sending message to 10.123.89.30:54966
at org.jboss.profileservice.cluster.repository.DefaultRepositoryClusteringHandler.rethrowAsUnchecked(DefaultRepositoryClusteringHandler.java:951)
at org.jboss.profileservice.cluster.repository.DefaultRepositoryClusteringHandler.installModifications(DefaultRepositoryClusteringHandler.java:630)
at org.jboss.profileservice.cluster.repository.DefaultRepositoryClusteringHandler.synchronizeContent(DefaultRepositoryClusteringHandler.java:324)
at org.jboss.system.server.profileservice.repository.clustered.ClusteredDeploymentRepository.load(ClusteredDeploymentRepository.java:255)
at org.jboss.system.server.profile.repository.AbstractProfile.create(AbstractProfile.java:158)
... 22 more
Caused by: org.jgroups.TimeoutException: timeout sending message to 10.123.89.30:54966
at org.jgroups.blocks.MessageDispatcher.sendMessage(MessageDispatcher.java:606)
at org.jgroups.blocks.RpcDispatcher.callRemoteMethod(RpcDispatcher.java:323)
at org.jgroups.blocks.RpcDispatcher.callRemoteMethod(RpcDispatcher.java:304)
at org.jboss.ha.framework.server.ClusterPartition.callMethodOnNode(ClusterPartition.java:1203)
at org.jboss.profileservice.cluster.repository.DefaultRepositoryClusteringHandler.executePullFromPeer(DefaultRepositoryClusteringHandler.java:741)
at org.jboss.profileservice.cluster.repository.DefaultRepositoryClusteringHandler.executePull(DefaultRepositoryClusteringHandler.java:702)
at org.jboss.profileservice.cluster.repository.DefaultRepositoryClusteringHandler.executeSynchronizationActions(DefaultRepositoryClusteringHandler.java:677)
at org.jboss.profileservice.cluster.repository.DefaultRepositoryClusteringHandler.installModifications(DefaultRepositoryClusteringHandler.java:592)
... 25 more
2013-07-30 10:07:26,678 [ERROR] main [TRY] [TRYDN] [STDERR] at [ttcp-gp5app1] Failed to boot JBoss:
2013-07-30 10:07:26,678 [ERROR] main [TRY] [TRYDN] [STDERR] at [ttcp-gp5app1] java.lang.IllegalStateException: Incompletely deployed:
jgroups-channelfactory-stacks.xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE protocol_stacks [
<!ENTITY shared-udp '
<!-- UDP transport config meant to be shared between different channels
with different requirements. Message bundling is disabled in this
general-purpose config as it can add latency to synchronous RPCs. -->
<UDP
singleton_name="shared-udp"
mcast_port="${jboss.jgroups.udp.mcast_port:30403}"
mcast_addr="${jboss.partition.udpGroup:228.1.2.3}"
tos="8"
ucast_recv_buf_size="20000000"
ucast_send_buf_size="640000"
mcast_recv_buf_size="25000000"
mcast_send_buf_size="640000"
loopback="true"
discard_incompatible_packets="true"
enable_bundling="false"
max_bundle_size="64000"
max_bundle_timeout="30"
use_incoming_packet_handler="true"
ip_ttl="${jgroups.udp.ip_ttl:2}"
thread_naming_pattern="cl"
timer.num_threads="12"
enable_diagnostics="${jboss.jgroups.enable_diagnostics:true}"
diagnostics_addr="${jboss.jgroups.diagnostics_addr:224.0.0.75}"
diagnostics_port="${jboss.jgroups.diagnostics_port:7500}"
use_concurrent_stack="true"
thread_pool.enabled="true"
thread_pool.min_threads="20"
thread_pool.max_threads="200"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="true"
thread_pool.queue_max_size="1000"
thread_pool.rejection_policy="discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="20"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
'>
]>
<!--
Standard JGroups protocol stacks definitions, used by the JChannelFactory bean.
Author: Bela Ban, Brian Stansberry
Version: $Id:jgroups-channelfactory-stacks.xml 71313 2008-03-26 19:46:59Z bstansberry@jboss.com $
-->
<protocol_stacks>
<stack name="udp"
description="Default: IP multicast based stack, with flow control.">
<config>
<!-- UDP transport config meant to be shared between different channels,
including a JBoss Messaging channel that uses the 'jbm-control'
stack listed below. Message bundling is disabled, as it can add
latency to synchronous group RPCs. Services that only make
asynchronous RPCs (e.g. JBoss Cache configured for REPL_ASYNC)
and do so in high volume may be able to improve performance by
configuring their cache to use the udp-async stack below.
Services that only make synchronous RPCs (e.g. JBoss Cache
configured for REPL_SYNC or INVALIDATION_SYNC) may be able
to improve performance by using the udp-sync stack below, which
does not include flow control.
The UDP config is included via an XML entity to ensure that
it remains consistent between this stack and the 'jbm-control'
stack below.
-->
&shared-udp;
<PING timeout="2000" num_initial_members="3"/>
<MERGE2 max_interval="100000" min_interval="20000"/>
<FD_SOCK/>
<FD timeout="6000" max_tries="5" shun="true"/>
<VERIFY_SUSPECT timeout="1500"/>
<pbcast.NAKACK use_mcast_xmit="false" gc_lag="0"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200,2400,3600"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
shun="true"
view_bundling="true"
view_ack_collection_timeout="5000"/>
<FC max_credits="2000000" min_threshold="0.10"
ignore_synchronous_response="true"/>
<FRAG2 frag_size="60000"/>
<!-- pbcast.STREAMING_STATE_TRANSFER/ -->
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>
</config>
</stack>
<stack name="udp-async"
description="Same as the default 'udp' stack above, except message bundling
is enabled in the transport protocol (enable_bundling=true).
Useful for services that make high-volume asynchronous
RPCs (e.g. high volume JBoss Cache instances configured
for REPL_ASYNC) where message bundling may improve performance.">
<config>
<UDP
singleton_name="udp-async"
mcast_port="${jboss.jgroups.udp_async.mcast_port:30403}"
mcast_addr="${jboss.partition.udpGroup:228.1.2.3}"
tos="8"
ucast_recv_buf_size="20000000"
ucast_send_buf_size="640000"
mcast_recv_buf_size="25000000"
mcast_send_buf_size="640000"
loopback="true"
discard_incompatible_packets="true"
enable_bundling="false"
max_bundle_size="64000"
max_bundle_timeout="30"
use_incoming_packet_handler="true"
ip_ttl="${jgroups.udp.ip_ttl:2}"
thread_naming_pattern="cl"
timer.num_threads="12"
enable_diagnostics="${jboss.jgroups.enable_diagnostics:true}"
diagnostics_addr="${jboss.jgroups.diagnostics_addr:224.0.0.75}"
diagnostics_port="${jboss.jgroups.diagnostics_port:7500}"
use_concurrent_stack="true"
thread_pool.enabled="true"
thread_pool.min_threads="8"
thread_pool.max_threads="200"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="true"
thread_pool.queue_max_size="1000"
thread_pool.rejection_policy="discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="8"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
<PING timeout="2000" num_initial_members="3"/>
<MERGE2 max_interval="100000" min_interval="20000"/>
<FD_SOCK/>
<FD timeout="6000" max_tries="5" shun="true"/>
<VERIFY_SUSPECT timeout="1500"/>
<pbcast.NAKACK use_mcast_xmit="false" gc_lag="0"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200,2400,3600"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
shun="true"
view_bundling="true"
view_ack_collection_timeout="5000"/>
<FC max_credits="2000000" min_threshold="0.10"
ignore_synchronous_response="true"/>
<FRAG2 frag_size="60000"/>
<!-- pbcast.STREAMING_STATE_TRANSFER/ -->
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>
</config>
</stack>
<stack name="udp-sync"
description="IP multicast based stack, without flow control and
without message bundling. This should be used instead
of 'udp' if (1) synchronous calls are used and (2) the
message volume (rate and size) is not that large. Don't
use this configuration if you send messages at a high
sustained rate, or you might run out of memory">
<config>
<UDP
singleton_name="udp_sync"
mcast_port="${jboss.jgroups.udp_sync.mcast_port:30402}"
mcast_addr="${jboss.partition.udpGroup:228.1.2.3}"
tos="8"
ucast_recv_buf_size="20000000"
ucast_send_buf_size="640000"
mcast_recv_buf_size="25000000"
mcast_send_buf_size="640000"
loopback="true"
discard_incompatible_packets="true"
enable_bundling="false"
max_bundle_size="64000"
max_bundle_timeout="30"
use_incoming_packet_handler="true"
ip_ttl="${jgroups.udp.ip_ttl:2}"
enable_diagnostics="${jboss.jgroups.enable_diagnostics:true}"
diagnostics_addr="${jboss.jgroups.diagnostics_addr:224.0.0.75}"
diagnostics_port="${jboss.jgroups.diagnostics_port:7500}"
use_concurrent_stack="true"
thread_pool.enabled="true"
thread_pool.min_threads="8"
thread_pool.max_threads="200"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="true"
thread_pool.queue_max_size="1000"
thread_pool.rejection_policy="discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="8"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
<PING timeout="2000" num_initial_members="3"/>
<MERGE2 max_interval="100000" min_interval="20000"/>
<FD_SOCK/>
<FD timeout="6000" max_tries="5" shun="true"/>
<VERIFY_SUSPECT timeout="1500"/>
<pbcast.NAKACK use_mcast_xmit="false" gc_lag="0"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200,2400,3600"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
shun="true"
view_bundling="true"
view_ack_collection_timeout="5000"/>
<FRAG2 frag_size="60000"/>
<!--pbcast.STREAMING_STATE_TRANSFER/ -->
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>
</config>
</stack>
<stack name="tcp"
description="TCP based stack, with flow control and message bundling.
TCP stacks are usually used when IP multicasting cannot
be used in a network, e.g. because it is disabled (e.g.
routers discard multicast)">
<config>
<TCP
singleton_name="tcp"
start_port="${jboss.jgroups.tcp.tcp_port:7600}"
tcp_nodelay="true"
loopback="false"
recv_buf_size="20000000"
send_buf_size="640000"
discard_incompatible_packets="true"
max_bundle_size="64000"
max_bundle_timeout="30"
use_incoming_packet_handler="true"
enable_bundling="true"
use_send_queues="false"
sock_conn_timeout="300"
skip_suspected_members="true"
timer.num_threads="12"
enable_diagnostics="${jboss.jgroups.enable_diagnostics:true}"
diagnostics_addr="${jboss.jgroups.diagnostics_addr:224.0.0.75}"
diagnostics_port="${jboss.jgroups.diagnostics_port:7500}"
use_concurrent_stack="true"
thread_pool.enabled="true"
thread_pool.min_threads="20"
thread_pool.max_threads="200"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="true"
thread_pool.queue_max_size="1000"
thread_pool.rejection_policy="discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="20"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
<!-- Alternative 1: multicast-based automatic discovery. -->
<MPING timeout="3000"
num_initial_members="3"
mcast_addr="${jboss.partition.udpGroup:228.1.2.3}"
mcast_port="${jgroups.tcp.mping_mcast_port:30403}"
ip_ttl="${jgroups.udp.ip_ttl:2}"/>
<!-- Alternative 2: non multicast-based replacement for MPING. Requires a static configuration
of *all* possible cluster members.
<TCPPING timeout="3000"
initial_hosts="${jgroups.tcpping.initial_hosts:localhost[7600],localhost[7601]}"
port_range="1"
num_initial_members="3"/>
-->
<MERGE2 max_interval="100000" min_interval="20000"/>
<FD_SOCK/>
<FD timeout="6000" max_tries="5" shun="true"/>
<VERIFY_SUSPECT timeout="1500"/>
<pbcast.NAKACK use_mcast_xmit="false" gc_lag="0"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200,2400,3600"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
shun="true"
view_bundling="true"
view_ack_collection_timeout="5000"/>
<FC max_credits="2000000" min_threshold="0.10"
ignore_synchronous_response="true"/>
<FRAG2 frag_size="60000"/>
<!-- pbcast.STREAMING_STATE_TRANSFER/ -->
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>
</config>
</stack>
<stack name="tcp-sync"
description="TCP based stack, without flow control and without
message bundling. TCP stacks are usually used when IP
multicasting cannot be used in a network (e.g.routers
discard multicast). This configuration should be used
instead of 'tcp' above when (1) synchronous calls are
used and (2) the message volume (rate and size) is not
that large.">
<config>
<TCP
singleton_name="tcp_sync"
start_port="${jboss.jgroups.tcp_sync.tcp_port:7650}"
tcp_nodelay="true"
loopback="false"
recv_buf_size="20000000"
send_buf_size="640000"
discard_incompatible_packets="true"
max_bundle_size="64000"
max_bundle_timeout="30"
use_incoming_packet_handler="true"
enable_bundling="false"
use_send_queues="false"
sock_conn_timeout="300"
skip_suspected_members="true"
enable_diagnostics="${jboss.jgroups.enable_diagnostics:true}"
diagnostics_addr="${jboss.jgroups.diagnostics_addr:224.0.0.75}"
diagnostics_port="${jboss.jgroups.diagnostics_port:7500}"
use_concurrent_stack="true"
thread_pool.enabled="true"
thread_pool.min_threads="8"
thread_pool.max_threads="200"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="true"
thread_pool.queue_max_size="1000"
thread_pool.rejection_policy="discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="8"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
<!-- Alternative 1: multicast-based automatic discovery. -->
<MPING timeout="3000"
num_initial_members="3"
mcast_addr="${jboss.partition.udpGroup:228.1.2.3}"
mcast_port="${jboss.jgroups.tcp_sync.mping_mcast_port:30403}"
ip_ttl="${jgroups.udp.ip_ttl:2}"/>
<!-- Alternative 2: non multicast-based replacement for MPING. Requires a static configuration
of all possible cluster members.
<TCPPING timeout="3000"
initial_hosts="${jgroups.tcpping.initial_hosts:localhost[7650],localhost[7651]}"
port_range="1"
num_initial_members="3"/>
-->
<MERGE2 max_interval="100000" min_interval="20000"/>
<FD_SOCK/>
<FD timeout="6000" max_tries="5" shun="true"/>
<VERIFY_SUSPECT timeout="1500"/>
<pbcast.NAKACK use_mcast_xmit="false" gc_lag="0"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200,2400,3600"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
shun="true"
view_bundling="true"
view_ack_collection_timeout="5000"/>
<!-- pbcast.STREAMING_STATE_TRANSFER/ -->
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>
</config>
</stack>
<stack name="jbm-control"
description="Stack optimized for the JBoss Messaging Control Channel">
<config>
<!-- By default we use the same UDP transport protocol config as is
used for the default 'udp' stack defined above. This allows
the JBoss Messaging Control Channel to use the same sockets,
network buffers and thread pools as are used by the other
standard JBoss AS clustered services.
The UDP config is included via an XML entity to ensure that
it remains consistent between this stack and the 'udp'
stack above.
-->
&shared-udp;
<PING timeout="2000"
num_initial_members="3"/>
<MERGE2 max_interval="100000"
min_interval="20000"/>
<FD_SOCK />
<FD timeout="6000" max_tries="5" shun="true"/>
<VERIFY_SUSPECT timeout="1500" />
<BARRIER />
<pbcast.NAKACK use_stats_for_retransmission="false"
exponential_backoff="150"
use_mcast_xmit="true" gc_lag="0"
retransmit_timeout="50,300,600,1200"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200,2400,3600"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<VIEW_SYNC avg_send_interval="10000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
shun="true"
view_bundling="true"/>
<FC max_credits="500000" min_threshold="0.20"
ignore_synchronous_response="true"/>
<FRAG2 frag_size="60000" />
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="20000"/>
</config>
</stack>
<stack name="jbm-data"
description="Stack optimized for the JBoss Messaging Data Channel">
<config>
<TCP singleton_name="jbm-data"
start_port="${jboss.messaging.datachanneltcpport:7900}"
loopback="true"
recv_buf_size="20000000"
send_buf_size="640000"
discard_incompatible_packets="true"
max_bundle_size="64000"
max_bundle_timeout="30"
use_incoming_packet_handler="true"
enable_bundling="false"
use_send_queues="false"
sock_conn_timeout="300"
skip_suspected_members="true"
enable_diagnostics="${jboss.jgroups.enable_diagnostics:true}"
diagnostics_addr="${jboss.jgroups.diagnostics_addr:224.0.0.75}"
diagnostics_port="${jboss.jgroups.diagnostics_port:7500}"
use_concurrent_stack="true"
thread_pool.enabled="true"
thread_pool.min_threads="8"
thread_pool.max_threads="200"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="true"
thread_pool.queue_max_size="500"
thread_pool.rejection_policy="discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="100"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
<MPING timeout="5000"
mcast_addr="${jboss.partition.udpGroup:228.1.2.3}"
mcast_port="${jboss.messaging.datachanneludpport:30401}"
ip_ttl="${jboss.messaging.ipttl:8}"
num_initial_members="5"
num_ping_requests="3"/>
<MERGE2 max_interval="100000" min_interval="20000"/>
<FD_SOCK/>
<FD timeout="6000" max_tries="5" shun="true"/>
<VERIFY_SUSPECT timeout="1500"/>
<BARRIER/>
<pbcast.NAKACK use_mcast_xmit="false" gc_lag="0"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200,2400,3600"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<VIEW_SYNC avg_send_interval="10000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
shun="true" view_bundling="true"/>
</config>
</stack>
</protocol_stacks>