single node cluster (local mode) can not start with IllegalStateException
dex80526 Dec 3, 2012 6:04 PMHi all: I have a singlenoe cluster configured a cache with cluster mode set to "local", and have several instances with the similar configurations without problem. However, one of system just can not start up.
I made sure there is no duplicate names. Every time, I start the application and get the same error if I change the cluster name in Infinispan.xml (see below).
I am using Infinisapn 5.1.4.Final for this.
I am out of ideas to try. I do not understand the error: cluster 'local_gw' is already connected to singleton transport.
I do not have any other instance is running.
thanks
=================
org.infinispan.manager.EmbeddedCacheManagerStartupException: org.infinispan.CacheException: Unable to invoke method public void org.infinispan.remoting.transport.jgroups.JGroupsTransport.start() on object of type JGroupsTransport
at org.infinispan.factories.GlobalComponentRegistry.start(GlobalComponentRegistry.java:223)
at org.infinispan.manager.DefaultCacheManager.wireCache(DefaultCacheManager.java:684)
at org.infinispan.manager.DefaultCacheManager.createCache(DefaultCacheManager.java:649)
at org.infinispan.manager.DefaultCacheManager.getCache(DefaultCacheManager.java:549)
....
Caused by: java.lang.IllegalStateException: cluster 'local_gw' is already connected to singleton transport: [dummy-1354557074615, dummy-1354557038904, dummy-1354557028675, dummy-1354557069508, dummy-1354557054249, dummy-1354557064426, dummy-1354557049152, dummy-1354557059345, dummy-1354557033791, dummy-1354557044023, local_gw]
at org.jgroups.stack.ProtocolStack.startStack(ProtocolStack.java:909)
at org.jgroups.JChannel.startStack(JChannel.java:841)
at org.jgroups.JChannel.connect(JChannel.java:277)
at org.jgroups.JChannel.connect(JChannel.java:261)
at org.infinispan.remoting.transport.jgroups.JGroupsTransport.startJGroupsChannelIfNeeded(JGroupsTransport.java:184)
... 21 more
infinisapn.xml:
<infinispan xmlns="urn:infinispan:config:5.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:infinispan:config:5.1 http://www.infinispan.org/schemas/infinispan-config-5.1.xsd">
<global>
<transport clusterName="local_gw" machineId="gw" rackId="rack_local_9.3">
<properties>
<property name="configurationFile" value="/test/jgroups-tcp.xml"/>
</properties>
</transport>
<globalJmxStatistics enabled="false"/>
<!--
Used to register JVM shutdown hooks.
hookBehavior: DEFAULT, REGISTER, DONT_REGISTER
-->
<shutdown hookBehavior="DONT_REGISTER"/>
</global>
<default>
<locking concurrencyLevel="5000" isolationLevel="READ_COMMITTED" lockAcquisitionTimeout="60000" useLockStriping="false" writeSkewCheck="false"/>
<!--
Used to register a transaction manager and participate in JTA and XA transactions.
See:
http://community.jboss.org/wiki/Infinispantransactions
-->
<transaction cacheStopTimeout="30000" eagerLockSingleNode="false" lockingMode="OPTIMISTIC" syncCommitPhase="false" syncRollbackPhase="false" transactionManagerLookupClass="org.infinispan.transaction.lookup.JBossStandaloneJTAManagerLookup" useEagerLocking="false"/>
<!--
Enables deadlock detection. See:
http://community.jboss.org/wiki/Infinispantransactions#deadlock
-->
<deadlockDetection enabled="true" spinDuration="1000"/>
<jmxStatistics enabled="false"/>
</default>
<namedCache name="session">
<clustering mode="local">
<stateTransfer fetchInMemoryState="true" timeout="240000"/>
</clustering>
<transaction transactionMode="NON_TRANSACTIONAL" use1PcForAutoCommitTransactions="true"/>
<!-- we do not use eviction in 9.3 yet -->
<eviction maxEntries="300000" strategy="LRU"/>
<!-- time units below are millseconds -->
<expiration lifespan="-1" maxIdle="-1" wakeUpInterval="-1"/>
</namedCache>
...
jgroups-tcp.xml:
<config xmlns="urn:org:jgroups" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:org:jgroups file:schema/JGroups-3.0.xsd">
<TCP bind_addr="127.0.0.1" bind_port="7900" bundler_type="old" discard_incompatible_packets="true" enable_bundling="true" enable_diagnostics="false" loopback="true" max_bundle_size="64K" max_bundle_timeout="30" oob_thread_pool.enabled="true" oob_thread_pool.keep_alive_time="60000" oob_thread_pool.max_threads="60" oob_thread_pool.min_threads="2" oob_thread_pool.queue_enabled="false" oob_thread_pool.queue_max_size="100" oob_thread_pool.rejection_policy="Discard" port_range="1" recv_buf_size="20M" send_buf_size="640K" singleton_name="tcp" sock_conn_timeout="300" thread_naming_pattern="pl" thread_pool.enabled="true" thread_pool.keep_alive_time="60000" thread_pool.max_threads="60" thread_pool.min_threads="2" thread_pool.queue_enabled="true" thread_pool.queue_max_size="100" thread_pool.rejection_policy="Discard" timer.keep_alive_time="3000" timer.max_threads="10" timer.min_threads="4" timer.queue_max_size="500" timer_type="new" use_send_queues="true"/>
<!-- Ergonomics, new in JGroups 2.11, are disabled by default in TCPPING until JGRP-1253 is resolved -->
<TCPPING ergonomics="false" initial_hosts="127.0.0.1[7900]" num_initial_members="1" port_range="1" timeout="3000"/>
<!--
<MPING bind_addr="${jgroups.bind_addr:127.0.0.1}" break_on_coord_rsp="true"
mcast_addr="${jgroups.udp.mcast_addr:228.6.7.8}" mcast_port="${jgroups.udp.mcast_port:46655}" ip_ttl="${jgroups.udp.ip_ttl:2}"
num_initial_members="3"/>
-->
<MERGE2 max_interval="30000" min_interval="10000"/>
<FD_SOCK port_range="1" start_port="7902"/>
<FD max_tries="3" timeout="15000"/>
<VERIFY_SUSPECT num_msgs="2" timeout="10000"/>
<pbcast.NAKACK discard_delivered_msgs="false" retransmit_timeout="300,600,1200,2400,4800" use_mcast_xmit="false"/>
<UNICAST2 max_bytes="1M" stable_interval="5000" timeout="300,600,1200"/>
<pbcast.STABLE desired_avg_gossip="50000" max_bytes="1M" stability_delay="1000"/>
<pbcast.GMS join_timeout="7000" print_local_addr="false" view_bundling="true"/>
<UFC max_credits="200K" min_threshold="0.20"/>
<MFC max_credits="200K" min_threshold="0.20"/>
<FRAG2 frag_size="60K"/>
<RSVP ack_on_delivery="false" resend_interval="500" timeout="60000"/>
</config>