Modeshape 2.8.3.Final + infinispan problem
pjakub Jan 17, 2013 12:14 PMHi, I'm trying to setup Jboss 4.2.3 (I must use that old version..) with Modeshape 2.8.3.Final and Infinispan 4.2.1.Final
My config files are as follow:
jcr-config.xml
<?xml version="1.0" encoding="UTF-8" ?>
<configuration xmlns:mode="http://www.modeshape.org/1.0" xmlns:jcr="http://www.jcp.org/jcr/1.0">
<mode:repositories>
<mode:repository jcr:name="tlk_repo" mode:source="tlk_repo_src">
<mode:options jcr:primaryType="mode:options">
<jaasLoginConfigName jcr:primaryType="mode:option" mode:value=""/>
<systemSourceName jcr:primaryType="mode:option" mode:value=""/>
<anonymousUserRoles jcr:primaryType="mode:option" mode:value="readonly,readwrite,admin,connect"/>
<queryIndexDirectory jcr:primaryType="mode:option" mode:value="/opt/data20/content_repo/index"/>
<queryIndexesUpdatedSynchronously jcr:primaryType="mode:option" mode:value="true"/>
<rebuildQueryIndexOnStartup jcr:primaryType="mode:option" mode:value="ifMissing"/>
</mode:options>
<jcr:nodeTypes>
<mode:resource>//jboss/server/default/deploy/tlk-custom-node-types.cnd</mode:resource>
</jcr:nodeTypes>
</mode:repository>
</mode:repositories>
<mode:sources jcr:primaryType="nt:unstructured">
<mode:source jcr:name="tlk_repo_src"
mode:classname="org.modeshape.connector.infinispan.InfinispanSource"
mode:retryLimit="3" mode:defaultWorkspaceName="default"
mode:predefinedWorkspaceNames="default,global,219,1142,1143,1151"
mode:updatesAllowed="true"
mode:cacheConfigurationName="/jboss/server/default/deploy/modeshape3-jca.rar/jcr-infinispan.xml"
mode:creatingWorkspaceAllowed="true"
mode:rootNodeUuid="d878c1c0-684c-11df-a08a-0800200c9a66">
</mode:source>
</mode:sources>
<mode:clustering clusterName="tlk-modeshape-cluster">
<configuration><![CDATA[
<config>
<UDP
mcast_addr='224.1.8.8'
mcast_port='3333'
ip_ttl='64'
ip_mcast='true'
mcast_send_buf_size='120000'
mcast_recv_buf_size='80000'
ucast_send_buf_size='120000'
ucast_recv_buf_size='80000'
loopback='true' />
<PING timeout='1000' num_initial_members='2' />
<MERGE2 min_interval='10000' max_interval='20000' />
<FD timeout='10000'/>
<VERIFY_SUSPECT timeout='1500' />
<pbcast.NAKACK retransmit_timeout='600,1200,2400,4800' discard_delivered_msgs='true' use_mcast_xmit='false' />
<UNICAST timeout='600,1200,2400' />
<pbcast.STABLE desired_avg_gossip='20000' max_bytes='400000' />
<FRAG frag_size='8192' />
<pbcast.GMS join_timeout='5000' print_local_addr='true' />
<pbcast.STATE_TRANSFER />
</config>]]>
</configuration>
</mode:clustering>
</configuration>
jcr-infinispan.xml
<?xml version="1.0" encoding="UTF-8"?>
<infinispan xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:infinispan:config:4.0">
<global>
<transport>
<properties>
<property name="configurationFile"
value="/jboss/server/default/deploy/modeshape3-jca.rar/jcr-infinispan-jgroups.xml" />
</properties>
</transport>
</global>
<default>
<locking useLockStriping="false"/>
<clustering mode="distribution">
<sync />
<hash numOwners="2" rehashWait="120000" rehashRpcTimeout="600000" />
<l1 enabled="false" lifespan="600000" />
</clustering>
<loaders shared="true" preload="false">
<loader class="org.infinispan.loaders.file.FileCacheStore"
fetchPersistentState="true" ignoreModifications="false"
purgeOnStartup="false">
<properties>
<property name="location" value="/opt/data20/content_repo/l/store" />
</properties>
</loader>
</loaders>
</default>
</infinispan>
jcr-infinispan-jgroups.xml
<?xml version="1.0" encoding="UTF-8"?>
<config>
<UDP mcast_addr='224.1.8.9' mcast_port='3337'
ip_ttl='64' ip_mcast='true' mcast_send_buf_size='120000'
mcast_recv_buf_size='80000' ucast_send_buf_size='120000'
ucast_recv_buf_size='80000' loopback='false' />
<PING timeout='1000' num_initial_members='2' />
<MERGE2 min_interval='10000' max_interval='20000' />
<FD timeout='5000'/>
<VERIFY_SUSPECT timeout='1500' />
<pbcast.NAKACK retransmit_timeout='600,1200,2400,5200'
discard_delivered_msgs='true' use_mcast_xmit='false' />
<UNICAST timeout='600,1200,2400' />
<pbcast.STABLE desired_avg_gossip='20000' max_bytes='400000' />
<FRAG frag_size='8192' />
<pbcast.GMS join_timeout='5000' print_local_addr='true' />
<pbcast.STATE_TRANSFER />
<!-- https://issues.jboss.org/browse/ISPN-83, required for Infinispan 4.x -->
<pbcast.FLUSH timeout="0"/>
</config>
/opt/data20 is a nfs shared between all machines.
I'm running one server + two webdavs (they have queryExecutionEnabled = false)
Everythink works great.
When I start another server everything still works great. It can query modeshape for files - everything is ok.
But when second instance try adding some files I'm getting strange exceptions:
DEBUG [RuleBasedOptimizer] Running query optimizer rule AddAccessNodes
11:41:55,879 DEBUG [RuleBasedOptimizer] Running query optimizer rule PushSelectCriteria
11:41:55,879 DEBUG [RuleBasedOptimizer] Running query optimizer rule PushProjects
11:41:55,880 DEBUG [RuleBasedOptimizer] Running query optimizer rule RewriteAsRangeCriteria
11:44:12,147 ERROR [RepositoryQueryManager] Error updating the query indexes: Error while committing changes to the indexes for the "1142" workspace of the "tlk_repo_src" source: Stale NFS file handle
org.modeshape.search.lucene.LuceneException: Error while committing changes to the indexes for the "1142" workspace of the "tlk_repo_src" source: Stale NFS file handle
at org.modeshape.search.lucene.LuceneSearchSession.commit(LuceneSearchSession.java:347)
at org.modeshape.search.lucene.AbstractLuceneSearchEngine$AbstractLuceneProcessor.commit(AbstractLuceneSearchEngine.java:200)
at org.modeshape.graph.search.SearchEngineProcessor.close(SearchEngineProcessor.java:64)
at org.modeshape.search.lucene.LuceneSearchProcessor.close(LuceneSearchProcessor.java:91)
at org.modeshape.graph.search.SearchEngineIndexer.close(SearchEngineIndexer.java:482)
at org.modeshape.search.lucene.LuceneSearchEngine.index(LuceneSearchEngine.java:282)
at org.modeshape.jcr.RepositoryQueryManager$SelfContained.process(RepositoryQueryManager.java:418)
at org.modeshape.jcr.RepositoryQueryManager$SelfContained$2.notify(RepositoryQueryManager.java:315)
at org.modeshape.graph.observe.ChangeObservers.broadcast(ChangeObservers.java:114)
at org.modeshape.clustering.ClusteredObservationBus$Receiver.receive(ClusteredObservationBus.java:394)
at org.jgroups.JChannel.up(JChannel.java:1490)
at org.jgroups.stack.ProtocolStack.up(ProtocolStack.java:1074)
at org.jgroups.protocols.pbcast.STATE_TRANSFER.up(STATE_TRANSFER.java:150)
at org.jgroups.protocols.pbcast.GMS.up(GMS.java:891)
at org.jgroups.protocols.FRAG.unfragment(FRAG.java:269)
at org.jgroups.protocols.FRAG.up(FRAG.java:151)
at org.jgroups.protocols.pbcast.STABLE.up(STABLE.java:246)
at org.jgroups.protocols.UNICAST.up(UNICAST.java:309)
at org.jgroups.protocols.pbcast.NAKACK.handleMessage(NAKACK.java:838)
at org.jgroups.protocols.pbcast.NAKACK.up(NAKACK.java:667)
at org.jgroups.protocols.VERIFY_SUSPECT.up(VERIFY_SUSPECT.java:133)
at org.jgroups.protocols.FD.up(FD.java:275)
at org.jgroups.protocols.MERGE2.up(MERGE2.java:209)
at org.jgroups.protocols.Discovery.up(Discovery.java:291)
at org.jgroups.protocols.PING.up(PING.java:66)
at org.jgroups.protocols.TP.passMessageUp(TP.java:1102)
at org.jgroups.protocols.TP$3.run(TP.java:1025)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at java.lang.Thread.run(Thread.java:662)
Caused by: java.io.IOException: Stale NFS file handle
at java.io.RandomAccessFile.close0(Native Method)
at java.io.RandomAccessFile.close(RandomAccessFile.java:577)
at org.apache.lucene.store.FSDirectory$FSIndexOutput.close(FSDirectory.java:469)
at org.apache.lucene.index.TermInfosWriter.close(TermInfosWriter.java:243)
at org.apache.lucene.index.TermInfosWriter.close(TermInfosWriter.java:246)
at org.apache.lucene.util.IOUtils.close(IOUtils.java:121)
at org.apache.lucene.index.FormatPostingsFieldsWriter.finish(FormatPostingsFieldsWriter.java:70)
at org.apache.lucene.index.FreqProxTermsWriter.flush(FreqProxTermsWriter.java:138)
at org.apache.lucene.index.TermsHash.flush(TermsHash.java:113)
at org.apache.lucene.index.DocInverter.flush(DocInverter.java:70)
at org.apache.lucene.index.DocFieldProcessor.flush(DocFieldProcessor.java:60)
at org.apache.lucene.index.DocumentsWriter.flush(DocumentsWriter.java:581)
at org.apache.lucene.index.IndexWriter.doFlush(IndexWriter.java:3600)
at org.apache.lucene.index.IndexWriter.flush(IndexWriter.java:3565)
at org.apache.lucene.index.IndexWriter.optimize(IndexWriter.java:2510)
at org.apache.lucene.index.IndexWriter.optimize(IndexWriter.java:2486)
at org.apache.lucene.index.IndexWriter.optimize(IndexWriter.java:2456)
at org.modeshape.search.lucene.LuceneSearchSession.commit(LuceneSearchSession.java:325)
... 29 more
11:45:45,809 DEBUG [RuleBasedOptimizer] Running query optimizer rule org.modeshape.jcr.query.RewritePseudoColumns@4ba5dbd9
11:45:45,809 DEBUG [RuleBasedOptimizer] Running query optimizer rule ReplaceViews
11:45:45,810 DEBUG [RuleBasedOptimizer] Running query optimizer rule CopyCriteria
11:45:45,810 DEBUG [RuleBasedOptimizer] Running query optimizer rule RightOuterToLeftOuterJoins
11:45:45,810 DEBUG [RuleBasedOptim
or:
DEBUG [RuleBasedOptimizer] Running query optimizer rule PushSelectCriteria
12:36:26,900 DEBUG [RuleBasedOptimizer] Running query optimizer rule PushProjects
12:36:26,901 DEBUG [RuleBasedOptimizer] Running query optimizer rule RewriteAsRangeCriteria
12:36:58,417 ERROR [RepositoryQueryManager] Error updating the query indexes: Error while committing changes to the indexes for the "1142" workspace of the "tlk_repo_src" source: /opt/data20/content_repo/index/1142/content/_9.tis (No such file or directory)
org.modeshape.search.lucene.LuceneException: Error while committing changes to the indexes for the "1142" workspace of the "tlk_repo_src" source: /opt/data20/content_repo/index/1142/content/_9.tis (No such file or directory)
at org.modeshape.search.lucene.LuceneSearchSession.commit(LuceneSearchSession.java:347)
at org.modeshape.search.lucene.AbstractLuceneSearchEngine$AbstractLuceneProcessor.commit(AbstractLuceneSearchEngine.java:200)
at org.modeshape.graph.search.SearchEngineProcessor.close(SearchEngineProcessor.java:64)
at org.modeshape.search.lucene.LuceneSearchProcessor.close(LuceneSearchProcessor.java:91)
at org.modeshape.graph.search.SearchEngineIndexer.close(SearchEngineIndexer.java:482)
at org.modeshape.search.lucene.LuceneSearchEngine.index(LuceneSearchEngine.java:282)
at org.modeshape.jcr.RepositoryQueryManager$SelfContained.process(RepositoryQueryManager.java:418)
at org.modeshape.jcr.RepositoryQueryManager$SelfContained$2.notify(RepositoryQueryManager.java:315)
at org.modeshape.graph.observe.ChangeObservers.broadcast(ChangeObservers.java:114)
at org.modeshape.clustering.ClusteredObservationBus$Receiver.receive(ClusteredObservationBus.java:394)
at org.jgroups.JChannel.up(JChannel.java:1490)
at org.jgroups.stack.ProtocolStack.up(ProtocolStack.java:1074)
at org.jgroups.protocols.pbcast.STATE_TRANSFER.up(STATE_TRANSFER.java:150)
at org.jgroups.protocols.pbcast.GMS.up(GMS.java:891)
at org.jgroups.protocols.FRAG.unfragment(FRAG.java:269)
at org.jgroups.protocols.FRAG.up(FRAG.java:151)
at org.jgroups.protocols.pbcast.STABLE.up(STABLE.java:246)
at org.jgroups.protocols.UNICAST.up(UNICAST.java:309)
at org.jgroups.protocols.pbcast.NAKACK.handleMessage(NAKACK.java:838)
at org.jgroups.protocols.pbcast.NAKACK.up(NAKACK.java:667)
at org.jgroups.protocols.VERIFY_SUSPECT.up(VERIFY_SUSPECT.java:133)
at org.jgroups.protocols.FD.up(FD.java:275)
at org.jgroups.protocols.MERGE2.up(MERGE2.java:209)
at org.jgroups.protocols.Discovery.up(Discovery.java:291)
at org.jgroups.protocols.PING.up(PING.java:66)
at org.jgroups.protocols.TP.passMessageUp(TP.java:1102)
at org.jgroups.protocols.TP$IncomingPacket.handleMyMessage(TP.java:1658)
at org.jgroups.protocols.TP$IncomingPacket.run(TP.java:1635)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at java.lang.Thread.run(Thread.java:662)
Caused by: java.io.FileNotFoundException: /opt/data20/content_repo/index/1142/content/_9.tis (No such file or directory)
at java.io.RandomAccessFile.open(Native Method)
at java.io.RandomAccessFile.<init>(RandomAccessFile.java:216)
at org.apache.lucene.store.MMapDirectory.openInput(MMapDirectory.java:214)
at org.apache.lucene.index.TermInfosReader.<init>(TermInfosReader.java:104)
at org.apache.lucene.index.SegmentCoreReaders.<init>(SegmentCoreReaders.java:76)
at org.apache.lucene.index.SegmentReader.get(SegmentReader.java:115)
at org.apache.lucene.index.IndexWriter$ReaderPool.get(IndexWriter.java:710)
at org.apache.lucene.index.IndexWriter$ReaderPool.get(IndexWriter.java:685)
at org.apache.lucene.index.BufferedDeletesStream.applyDeletes(BufferedDeletesStream.java:201)
at org.apache.lucene.index.IndexWriter.doFlush(IndexWriter.java:3628)
at org.apache.lucene.index.IndexWriter.flush(IndexWriter.java:3565)
at org.apache.lucene.index.IndexWriter.optimize(IndexWriter.java:2510)
at org.apache.lucene.index.IndexWriter.optimize(IndexWriter.java:2486)
at org.apache.lucene.index.IndexWriter.optimize(IndexWriter.java:2456)
at org.modeshape.search.lucene.LuceneSearchSession.commit(LuceneSearchSession.java:325)
... 30 more
12:37:33,788 DEBUG [RepositoryConnectionPool] Shutting down repository connection pool for tlk_repo_src
12:37:33,788 DEBUG [RepositoryConnectionPool] Terminated repository connection pool for tlk_repo_src
what could be reason for that ?
for me it seems like all files are visible and ok because of infinispan, but indexes are being used in bad way by servers, they override each other files.
I tried running same configuration, but with index files poining to other directories, but then I'm getting some exceptions too ( lik "trying to acces /X/Y/Z but /X/Y is the closest file avaliable [i don't remember exact error])
Am I missing some obvious config property? Help please, I've spend more than four days trying to figure this out .I have had other errors, related to classloading, like joda-time twice on classpath, and wrong jgroups version, but I managed to resolve those)