Proper Method to Stop Infinispan in Tomcat
paulhethmon Oct 26, 2011 10:07 AMI've developed some code to use Infinispan as a cache inside my application deployed in Tomcat 6.0.28. In order to start the cache, I have a Listener that creates it and places it in the application context for later use. That all works fine. What's not working is gracefully shutting it down. The same Listener has the contextDestroyed() method to properly call the stop() methods on the individual cache objects and the cache manage object. I can see all of that firing. However, I will end up with a zombie Tomcat process that I have to kill -9 to get rid of. So it's sucking up resources and generally interfering with proper operation at that point. So what's the proper way to stop an Infinispan cache?
Some particulars:
Infinispan 5.0.1 FINAL
Development Platform: Mac OSX 10.5
Deployment Platform: CentOS 5.3
infinispan.xml:
{code:xml}
<global>
<shutdown hookBehavior="DONT_REGISTER"/>
<transport clusterName="clareitySafemls">
<properties>
<property name="configurationFile" value="jgroups-udp.xml"/>
</properties>
</transport>
</global>
<default>
<clustering mode="distribution">
<sync/>
<hash
numOwners="3"
rehashWait="120000"
rehashRpcTimeout="600000"
/>
</clustering>
</default>
{code}
jgroups-udp.xml
{code:xml}
<config xmlns="urn:org:jgroups"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:org:jgroups file:schema/JGroups-2.8.xsd">
<UDP
mcast_addr="239.0.0.1"
mcast_port="46607"
tos="8"
ucast_recv_buf_size="20000000"
ucast_send_buf_size="640000"
mcast_recv_buf_size="25000000"
mcast_send_buf_size="640000"
loopback="true"
discard_incompatible_packets="true"
max_bundle_size="64000"
max_bundle_timeout="30"
ip_ttl="${jgroups.udp.ip_ttl:8}"
enable_bundling="true"
enable_diagnostics="false"
thread_naming_pattern="pl"
thread_pool.enabled="true"
thread_pool.min_threads="2"
thread_pool.max_threads="30"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="false"
thread_pool.queue_max_size="100"
thread_pool.rejection_policy="Discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="2"
oob_thread_pool.max_threads="30"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="Discard"
/>
<PING timeout="3000" num_initial_members="3"/>
<MERGE2 max_interval="30000" min_interval="10000"/>
<FD_SOCK/>
<FD_ALL/>
<BARRIER />
<pbcast.NAKACK use_stats_for_retransmission="false"
exponential_backoff="0"
use_mcast_xmit="true" gc_lag="0"
retransmit_timeout="300,600,1200"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200"/>
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000" max_bytes="1000000"/>
<pbcast.GMS print_local_addr="false" join_timeout="3000" view_bundling="true"/>
<UFC max_credits="500000" min_threshold="0.20"/>
<MFC max_credits="500000" min_threshold="0.20"/>
<FRAG2 frag_size="60000" />
<pbcast.STREAMING_STATE_TRANSFER/>
</config>{code}