0 Replies Latest reply on Jan 11, 2013 1:36 AM by bargavi

    Clustering using RemoteCacheManager

    bargavi

      Infinispan server version - infinispan-5.1.6.FINAL

      Java - JRE 7

       

       

       

      Trying to create a cluster of two servers in separte machines. The Infinispan.xml(Kept in the client side) is given like this,

       

      <infinispan xmlns="urn:infinispan:config:5.1"

      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

      xsi:schemaLocation="urn:infinispan:config:5.1 http://docs.jboss.org/infinispan/schemas/infinispan-config-5.1.xsd">

      <global>

      <transport>

      <properties>

      <property name="configurationFile" value="jgroups-tcp.xml"/>

      </properties>

      </transport>

      </global>

      <default>

      <eviction maxEntries="1000" />

      </default>

       

      </infinispan>

       

       

      The Jgroups-tcp.xml (kept in the client side) goes like this,

       

      <!--

      Fast configuration for local mode, ie. all members reside on the same host. Setting ip_ttl to 0 means that

      no multicast packet will make it outside the local host.

      Therefore, this configuration will NOT work to cluster members residing on different hosts !

       

      Author: Bela Ban

      Version: $Id: fast-local.xml,v 1.9 2009/12/18 14:50:00 belaban Exp $

      -->

       

      <config xmlns="urn:org:jgroups"

      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

      xsi:schemaLocation="urn:org:jgroups http://www.jgroups.org/schema/JGroups-2.8.xsd">

      <TCP bind_port="7800" port_range="10"

      recv_buf_size="20000000"

      send_buf_size="640000"

      loopback="false"

      discard_incompatible_packets="true"

      max_bundle_size="64000"

      max_bundle_timeout="30"

      enable_bundling="true"

      enable_unicast_bundling="true"

      enable_diagnostics="true"

      thread_naming_pattern="cl"

       

      timer_type="new"

      timer.min_threads="4"

      timer.max_threads="10"

      timer.keep_alive_time="3000"

      timer.queue_max_size="1000"

      timer.wheel_size="200"

      timer.tick_time="50"

       

      thread_pool.enabled="true"

      thread_pool.min_threads="2"

      thread_pool.max_threads="8"

      thread_pool.keep_alive_time="5000"

      thread_pool.queue_enabled="true"

      thread_pool.queue_max_size="100000"

      thread_pool.rejection_policy="discard"

       

      oob_thread_pool.enabled="true"

      oob_thread_pool.min_threads="1"

      oob_thread_pool.max_threads="8"

      oob_thread_pool.keep_alive_time="5000"

      oob_thread_pool.queue_enabled="false"

      oob_thread_pool.queue_max_size="100"

      oob_thread_pool.rejection_policy="discard"/>

       

      <MPING bind_addr="${jgroups.bind_addr:10.242.106.15}" break_on_coord_rsp="true"

      mcast_addr="${jgroups.mping.mcast_addr:10.242.106.100}"

      mcast_port="${jgroups.mping.mcast_port:11222}"

      ip_ttl="${jgroups.udp.ip_ttl:2}"

      num_initial_members="3" timeout="2000"/>

       

      <MERGE2 max_interval="30000"

      min_interval="10000"/>

       

      <FD_SOCK/>

      <FD_ALL interval="2000" timeout="5000" />

      <VERIFY_SUSPECT timeout="500" />

      <BARRIER />

      <pbcast.NAKACK use_mcast_xmit="false"

      retransmit_timeout="100,300,600,1200"

      discard_delivered_msgs="true" />

      <UNICAST2 timeout="300,600,1200" />

       

      <pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"

      max_bytes="10m"/>

      <pbcast.GMS print_local_addr="true" join_timeout="5000"

      max_bundling_time="30"

      view_bundling="true"/>

      <UFC max_credits="2M"

      min_threshold="0.4"/>

      <MFC max_credits="2M"

      min_threshold="0.4"/>

      <FRAG2 frag_size="60000" />

      <pbcast.STATE_TRANSFER />

      </config>

       

       

       

       

      How do I make the cluster nodes know about each other? Also How will I configure the same through RemoteCacheManager ?

       

      Kindly throw some light on the same.