InvalidServerSideConfigurationException when creating cache using XML - java

I'm new with terracotta. I want to create a clustered server cache but found some difficulties with configuration files.
Here is my tc-config-terracotta.xml file (with which I launch terracotta server)
<?xml version="1.0" encoding="UTF-8"?>
<tc-config xmlns="http://www.terracotta.org/config"
xmlns:ohr="http://www.terracotta.org/config/offheap-resource">
<servers>
<server host="localhost" name="clustered">
<logs>/path/log/terracotta/server-logs</logs>
</server>
</servers>
<plugins>
<config>
<ohr:offheap-resources>
<ohr:resource name="primary-server-resource" unit="MB">128
</ohr:resource>
<ohr:resource name="secondary-server-resource" unit="MB">96
</ohr:resource>
</ohr:offheap-resources>
</config>
</plugins>
</tc-config>
I used the ehcache-clustered-3.3.1-kit to launch the server.
$myPrompt/some/dir/with/ehcache/clustered/server/bin>./start-tc-server.sh -f /path/to/conf/tc-config-terracotta.xml
No problem for the server to start
2017-06-01 11:29:14,052 INFO - New logging session started.
2017-06-01 11:29:14,066 INFO - Terracotta 5.2.2, as of 2017-03-29 at 15:26:20 PDT (Revision 397a456cfe4b8188dfe8b017a5c14346f79c2fcf from UNKNOWN)
2017-06-01 11:29:14,067 INFO - PID is 6114
2017-06-01 11:29:14,697 INFO - Successfully loaded base configuration from file at '/path/to/conf/tc-config-terracotta.xml'.
2017-06-01 11:29:14,757 INFO - Available Max Runtime Memory: 1822MB
2017-06-01 11:29:14,836 INFO - Log file: '/path/log/terracotta/server-logs/terracotta-server.log'.
2017-06-01 11:29:15,112 INFO - Becoming State[ ACTIVE-COORDINATOR ]
2017-06-01 11:29:15,129 INFO - Terracotta Server instance has started up as ACTIVE node on 0:0:0:0:0:0:0:0:9510 successfully, and is now ready for work.
Here is the ehcache-terracotta.xml configuration file
<ehcache:config xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xmlns:terracotta='http://www.ehcache.org/v3/clustered'
xmlns:ehcache='http://www.ehcache.org/v3'
xsi:schemaLocation="http://www.ehcache.org/v3 http://www.ehcache.org/schema/ehcache-core-3.3.xsd
http://www.ehcache.org/v3/clustered http://www.ehcache.org/schema/ehcache-clustered-ext-3.3.xsd">
<ehcache:service>
<terracotta:cluster>
<terracotta:connection url="terracotta://localhost:9510/clustered" />
<terracotta:server-side-config
auto-create="true">
<terracotta:default-resource from="primary-server-resource" />
</terracotta:server-side-config>
</terracotta:cluster>
</ehcache:service>
<ehcache:cache alias="myTest">
<ehcache:key-type>java.lang.String</ehcache:key-type>
<ehcache:value-type>java.lang.String</ehcache:value-type>
<ehcache:resources>
<terracotta:clustered-dedicated unit="MB">10
</terracotta:clustered-dedicated>
</ehcache:resources>
<terracotta:clustered-store consistency="strong" />
</ehcache:cache>
</ehcache:config>
I have a class to test the conf:
import java.net.URL;
import org.ehcache.Cache;
import org.ehcache.CacheManager;
import org.ehcache.config.Configuration;
import org.ehcache.config.builders.CacheManagerBuilder;
import org.ehcache.xml.XmlConfiguration;
public class TestTerracottaCacheManager
{
private static TestTerracottaCacheManager cacheManager = null;
private CacheManager cm;
private Cache<Object, Object> cache;
private static final String DEFAULT_CACHE_NAME = "myTest";
private String cacheName;
public static TestTerracottaCacheManager getInstance()
{
if (cacheManager == null)
{
cacheManager = new TestTerracottaCacheManager();
}
return cacheManager;
}
private TestTerracottaCacheManager()
{
// 1. Create a cache manager
final URL url =
TestTerracottaCacheManager.class.getResource("/ehcache-terracotta.xml");
System.out.println(url);
Configuration xmlConfig = new XmlConfiguration(url);
cm = CacheManagerBuilder.newCacheManager(xmlConfig);
cm.init();
intializeCache();
}
private void intializeCache()
{
// 2. Get a cache called "cache1", declared in ehcache.xml
cache = cm.getCache(cacheName == null ? DEFAULT_CACHE_NAME : cacheName,
Object.class, Object.class);
if (cache == null)
{
throw new NullPointerException();
}
}
public void put(Object key, Object value)
{
cache.put(key, value);
}
public Object get(String key)
{
// 5. Print out the element
Object ele = cache.get(key);
return ele;
}
public boolean isKeyInCache(Object key)
{
return cache.containsKey(key);
}
public void closeCache()
{
// 7. shut down the cache manager
cm.close();
}
public static void main(String[] args)
{
TestTerracottaCacheManager testCache = TestTerracottaCacheManager.getInstance();
testCache.put("titi", "1");
System.out.println(testCache.get("titi"));
testCache.closeCache();
}
public String getCacheName()
{
return cacheName;
}
public void setCacheName(String cacheName)
{
this.cacheName = cacheName;
}
}
I've got an exception. Here it's the stack trace:
14:18:38.978 [main] ERROR org.ehcache.core.EhcacheManager - Initialize failed.
Exception in thread "main" org.ehcache.StateTransitionException: Unable to validate cluster tier manager for id clustered
at org.ehcache.core.StatusTransitioner$Transition.failed(StatusTransitioner.java:235)
at org.ehcache.core.EhcacheManager.init(EhcacheManager.java:587)
at fr.test.cache.TestTerracottaCacheManager.<init>(TestTerracottaCacheManager.java:41)
at fr.test.cache.TestTerracottaCacheManager.getInstance(TestTerracottaCacheManager.java:28)
at fr.test.cache.TestTerracottaCacheManager.main(TestTerracottaCacheManager.java:81)
Caused by: org.ehcache.clustered.client.internal.ClusterTierManagerValidationException: Unable to validate cluster tier manager for id clusteredENS
at org.ehcache.clustered.client.internal.ClusterTierManagerClientEntityFactory.retrieve(ClusterTierManagerClientEntityFactory.java:196)
at org.ehcache.clustered.client.internal.service.DefaultClusteringService.autoCreateEntity(DefaultClusteringService.java:215)
at org.ehcache.clustered.client.internal.service.DefaultClusteringService.start(DefaultClusteringService.java:148)
at org.ehcache.core.internal.service.ServiceLocator.startAllServices(ServiceLocator.java:118)
at org.ehcache.core.EhcacheManager.init(EhcacheManager.java:559)
... 3 more
Caused by: org.ehcache.clustered.common.internal.exceptions.InvalidServerSideConfigurationException: Default resource not aligned. Client: primary-server-resource Server: null
at org.ehcache.clustered.common.internal.exceptions.InvalidServerSideConfigurationException.withClientStackTrace(InvalidServerSideConfigurationException.java:43)
at org.ehcache.clustered.common.internal.exceptions.InvalidServerSideConfigurationException.withClientStackTrace(InvalidServerSideConfigurationException.java:22)
at org.ehcache.clustered.common.internal.messages.ResponseCodec.decode(ResponseCodec.java:197)
at org.ehcache.clustered.common.internal.messages.EhcacheCodec.decodeResponse(EhcacheCodec.java:110)
at org.ehcache.clustered.common.internal.messages.EhcacheCodec.decodeResponse(EhcacheCodec.java:37)
at com.tc.object.EntityClientEndpointImpl$InvocationBuilderImpl$1.getWithTimeout(EntityClientEndpointImpl.java:193)
at com.tc.object.EntityClientEndpointImpl$InvocationBuilderImpl$1.getWithTimeout(EntityClientEndpointImpl.java:175)
at org.ehcache.clustered.client.internal.SimpleClusterTierManagerClientEntity.waitFor(SimpleClusterTierManagerClientEntity.java:184)
at org.ehcache.clustered.client.internal.SimpleClusterTierManagerClientEntity.invokeInternal(SimpleClusterTierManagerClientEntity.java:148)
at org.ehcache.clustered.client.internal.SimpleClusterTierManagerClientEntity.validate(SimpleClusterTierManagerClientEntity.java:120)
at org.ehcache.clustered.client.internal.ClusterTierManagerClientEntityFactory.retrieve(ClusterTierManagerClientEntityFactory.java:190)
... 7 more
Caused by: org.ehcache.clustered.common.internal.exceptions.InvalidServerSideConfigurationException: Default resource not aligned. Client: primary-server-resource Server: null
at org.ehcache.clustered.server.EhcacheStateServiceImpl.checkConfigurationCompatibility(EhcacheStateServiceImpl.java:207)
at org.ehcache.clustered.server.EhcacheStateServiceImpl.validate(EhcacheStateServiceImpl.java:194)
at org.ehcache.clustered.server.ClusterTierManagerActiveEntity.validate(ClusterTierManagerActiveEntity.java:253)
at org.ehcache.clustered.server.ClusterTierManagerActiveEntity.invokeLifeCycleOperation(ClusterTierManagerActiveEntity.java:203)
at org.ehcache.clustered.server.ClusterTierManagerActiveEntity.invoke(ClusterTierManagerActiveEntity.java:147)
at org.ehcache.clustered.server.ClusterTierManagerActiveEntity.invoke(ClusterTierManagerActiveEntity.java:57)
at com.tc.objectserver.entity.ManagedEntityImpl.performAction(ManagedEntityImpl.java:741)
at com.tc.objectserver.entity.ManagedEntityImpl.invoke(ManagedEntityImpl.java:488)
at com.tc.objectserver.entity.ManagedEntityImpl.lambda$processInvokeRequest$2(ManagedEntityImpl.java:319)
at com.tc.objectserver.entity.ManagedEntityImpl$SchedulingRunnable.run(ManagedEntityImpl.java:1048)
at com.tc.objectserver.entity.RequestProcessor$EntityRequest.invoke(RequestProcessor.java:170)
at com.tc.objectserver.entity.RequestProcessor$EntityRequest.run(RequestProcessor.java:161)
at com.tc.objectserver.entity.RequestProcessorHandler.handleEvent(RequestProcessorHandler.java:27)
at com.tc.objectserver.entity.RequestProcessorHandler.handleEvent(RequestProcessorHandler.java:23)
at com.tc.async.impl.StageQueueImpl$HandledContext.runWithHandler(StageQueueImpl.java:502)
at com.tc.async.impl.StageImpl$WorkerThread.run(StageImpl.java:192)
I think it's a problem in the XML files, but I'm not sure. Someone can help please?
Thanks

What the exception tells you is that the configuration of the clustered bits of your cache manager and cache differ between what the cluster knows and what the client ask.
The most likely explanation is that you ran your client code once with a different config, realised there was an issue or just wanted to change something. And then tried to run the client without destroying the cahche manager on the cluster or restarting the server.
You simply need to restart your server, to lose all clustered state since you want a different setup.

I've tried to reproduce your issue in my IDE, copying / pasting your 3 files.
I found an error in intializeCache():
cache = cm.getCache(cacheName == null ? DEFAULT_CACHE_NAME : cacheName,
Object.class, Object.class);
triggered a :
Exception in thread "main" java.lang.IllegalArgumentException: Cache 'myTest' type is <java.lang.String, java.lang.String>, but you retrieved it with <java.lang.Object, java.lang.Object>
at org.ehcache.core.EhcacheManager.getCache(EhcacheManager.java:162)
at MyXmlClient.intializeCache(MyXmlClient.java:48)
So please make sure that your xml configuration matches your Java code : you used <String, String> in XML, use <String, String> in your java code :
cache = cm.getCache(cacheName == null ? DEFAULT_CACHE_NAME : cacheName,
String.class, String.class);
Everything else worked fine !
INFO --- [8148202b7ba8914] customer.logger.tsa : Connection successfully established to server at 127.0.0.1:9510
INFO --- [ main] org.ehcache.core.EhcacheManager : Cache 'myTest' created in EhcacheManager.
1
INFO --- [ main] org.ehcache.core.EhcacheManager : Cache 'myTest' removed from EhcacheManager.
INFO --- [ main] o.e.c.c.i.s.DefaultClusteringService : Closing connection to cluster terracotta://localhost:9510
The error you gave is coming form a mismatch between the terracotta server offheap resource use in your client and the terracotta server offheap configuration ; make sure they match ! (copying / pasting your example they did !)

#AnthonyDahanne I am using ehcache-clustered-3.8.1-kit to launch the server. I have ehcache.xml my spring boot application is automatically picking my ehache.xml so I am not explicitly writing cacheManager.
<ehcache:config
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xmlns:terracotta='http://www.ehcache.org/v3/clustered'
xmlns:ehcache='http://www.ehcache.org/v3'
xsi:schemaLocation="http://www.ehcache.org/v3 http://www.ehcache.org/schema/ehcache-core-3.8.xsd
http://www.ehcache.org/v3/clustered http://www.ehcache.org/schema/ehcache-clustered-ext-3.8.xsd">
<ehcache:service>
<terracotta:cluster>
<terracotta:connection url="terracotta://localhost:9410/clustered"/>
<terracotta:server-side-config auto-create="true">
<!--<terracotta:default-resource from="default-resource"/>-->
<terracotta:shared-pool name="shared-pool-expense" unit="MB">100</terracotta:shared-pool>
</terracotta:server-side-config>
</terracotta:cluster>
</ehcache:service>
<ehcache:cache alias="areaOfCircleCache">
<ehcache:key-type>java.lang.String</ehcache:key-type>
<ehcache:value-type>com.db.entity.LogMessage</ehcache:value-type>
<ehcache:resources>
<!-- <ehcache:heap unit="entries">100</ehcache:heap>
<ehcache:offheap unit="MB">10</ehcache:offheap>-->
<terracotta:clustered-dedicated unit="MB">10</terracotta:clustered-dedicated>
</ehcache:resources>
</ehcache:cache>
</ehcache:config>

Related

Broadleaf Commerce Embedded Solr cannot run with root user

I download a fresh 6.1 broadleaf-commerce and run my local machine via java -javaagent:./admin/target/agents/spring-instrument.jar -jar admin/target/admin.jar successfully on mine macbook. But in my centos 7 I run sudo java -javaagent:./admin/target/agents/spring-instrument.jar -jar admin/target/admin.jar with following error
2020-10-12 13:20:10.838 INFO 2481 --- [ main] c.b.solr.autoconfigure.SolrServer : Syncing solr config file: jar:file:/home/mynewuser/seafood-broadleaf/admin/target/admin.jar!/BOOT-INF/lib/broadleaf-boot-starter-solr-2.2.1-GA.jar!/solr/standalone/solrhome/configsets/fulfillment_order/conf/solrconfig.xml to: /tmp/solr-7.7.2/solr-7.7.2/server/solr/configsets/fulfillment_order/conf/solrconfig.xml
*** [WARN] *** Your Max Processes Limit is currently 62383.
It should be set to 65000 to avoid operational disruption.
If you no longer wish to see this warning, set SOLR_ULIMIT_CHECKS to false in your profile or solr.in.sh
WARNING: Starting Solr as the root user is a security risk and not considered best practice. Exiting.
Please consult the Reference Guide. To override this check, start with argument '-force'
2020-10-12 13:20:11.021 ERROR 2481 --- [ main] c.b.solr.autoconfigure.SolrServer : Problem starting Solr
Here is the source code of solr configuration, I believe it is the place to change the configuration to run with the argument -force in programming way.
package com.community.core.config;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.broadleafcommerce.core.search.service.SearchService;
import org.broadleafcommerce.core.search.service.solr.SolrConfiguration;
import org.broadleafcommerce.core.search.service.solr.SolrSearchServiceImpl;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
/**
*
*
* #author Phillip Verheyden (phillipuniverse)
*/
#Component
public class ApplicationSolrConfiguration {
#Value("${solr.url.primary}")
protected String primaryCatalogSolrUrl;
#Value("${solr.url.reindex}")
protected String reindexCatalogSolrUrl;
#Value("${solr.url.admin}")
protected String adminCatalogSolrUrl;
#Bean
public SolrClient primaryCatalogSolrClient() {
return new HttpSolrClient.Builder(primaryCatalogSolrUrl).build();
}
#Bean
public SolrClient reindexCatalogSolrClient() {
return new HttpSolrClient.Builder(reindexCatalogSolrUrl).build();
}
#Bean
public SolrClient adminCatalogSolrClient() {
return new HttpSolrClient.Builder(adminCatalogSolrUrl).build();
}
#Bean
public SolrConfiguration blCatalogSolrConfiguration() throws IllegalStateException {
return new SolrConfiguration(primaryCatalogSolrClient(), reindexCatalogSolrClient(), adminCatalogSolrClient());
}
#Bean
protected SearchService blSearchService() {
return new SolrSearchServiceImpl();
}
}
Let me preface this by saying you would be better off simply not starting the application as root. If you are in Docker, you can use the USER command to switch to a non-root user.
The Solr server startup in Broadleaf Community is done programmatically via the broadleaf-boot-starter-solr dependency. This is the wrapper around Solr that ties it to the Spring lifecycle. All of the real magic happens in the com.broadleafcommerce.solr.autoconfigure.SolrServer class.
In that class, you will see a startSolr() method. This method is what adds startup arguments to Solr.
In your case, you will need to mostly copy this method wholesale and use cmdLine.addArgument(...) to add additional arguments. Example:
class ForceStartupSolrServer extends SolrServer {
public ForceStartupSolrServer(SolrProperties props) {
super(props);
}
protected void startSolr() {
if (!isRunning()) {
if (!downloadSolrIfApplicable()) {
throw new IllegalStateException("Could not download or expand Solr, see previous logs for more information");
}
stopSolr();
synchConfig();
{
CommandLine cmdLine = new CommandLine(getSolrCommand());
cmdLine.addArgument("start");
cmdLine.addArgument("-p");
cmdLine.addArgument(Integer.toString(props.getPort()));
// START MODIFICATION
cmdLine.addArgument("-force");
// END MODIFICATION
Executor executor = new DefaultExecutor();
PumpStreamHandler streamHandler = new PumpStreamHandler(System.out);
streamHandler.setStopTimeout(1000);
executor.setStreamHandler(streamHandler);
try {
executor.execute(cmdLine);
created = true;
checkCoreStatus();
} catch (IOException e) {
LOG.error("Problem starting Solr", e);
}
}
}
}
}
Then create an #Configuration class to override the blAutoSolrServer bean created by SolrAutoConfiguration (note the specific package requirement for org.broadleafoverrides.config):
package org.broadleafoverrides.config;
public class OverrideConfiguration {
#Bean
public ForceStartupSolrServer blAutoSolrServer(SolrProperties props) {
return new ForceStartupSolrServer(props);
}
}

EhCache, on-heap tier: No serializer found

I need on-heap cache, so I'm trying to work with ehcache v3.5.2
I have the next test:
public class TestEhCache {
public static class MyObj {
String message;
public MyObj(String message) {
this.message = message;
}
}
#Test
public void testDebugLogs() {
CacheManager cacheManager;
cacheManager = CacheManagerBuilder.newCacheManagerBuilder().build();
cacheManager.init();
Cache<String, MyObj> myCache = cacheManager.createCache("myCache",
CacheConfigurationBuilder.newCacheConfigurationBuilder(String.class, MyObj.class, ResourcePoolsBuilder.heap(3))
.build());
}
}
As result I see the next warning
2018-04-26 19:56:26,237 [main] DEBUG org.ehcache.impl.internal.spi.serialization.DefaultSerializationProvider - Serializer for <java.lang.String> : org.ehcache.impl.serialization.StringSerializer#365185bd
2018-04-26 19:56:26,238 [main] DEBUG org.ehcache.core.EhcacheManager - Could not create serializers for myCache
org.ehcache.spi.serialization.UnsupportedTypeException: No serializer found for type 'it.TestEhcache$MyObj'
at org.ehcache.impl.internal.spi.serialization.DefaultSerializationProvider.getSerializerClassFor(DefaultSerializationProvider.java:136)
at org.ehcache.impl.internal.spi.serialization.DefaultSerializationProvider.createSerializer(DefaultSerializationProvider.java:98)
at org.ehcache.impl.internal.spi.serialization.DefaultSerializationProvider.createValueSerializer(DefaultSerializationProvider.java:90)
at org.ehcache.core.EhcacheManager.getStore(EhcacheManager.java:477)
at org.ehcache.core.EhcacheManager.createNewEhcache(EhcacheManager.java:316)
at org.ehcache.core.EhcacheManager.createCache(EhcacheManager.java:265)
at org.ehcache.core.EhcacheManager.createCache(EhcacheManager.java:243)
at it.TestEhcache.testDebugLogs(TestEhcache.java:29)
2018-04-26 19:56:26,243 [main] DEBUG org.ehcache.impl.internal.spi.copy.DefaultCopyProvider - Copier for <java.lang.String> : org.ehcache.impl.copy.IdentityCopier#150c158
2018-04-26 19:56:26,244 [main] DEBUG org.ehcache.impl.internal.spi.copy.DefaultCopyProvider - Copier for <it.TestEhcache$MyObj> : org.ehcache.impl.copy.IdentityCopier#4524411f
2018-04-26 19:56:26,292 [main] DEBUG class org.ehcache.core.Ehcache-myCache - Initialize successful.
2018-04-26 19:56:26,292 [main] INFO org.ehcache.core.EhcacheManager - Cache 'myCache' created in EhcacheManager.
How to suppress "No serializer found for type" warning? As I understood it's not required for no-heap tier. (see Louis's Jacomet reply here)
Decrease the log level - this is printed at DEBUG so it is not technically a warning.
And indeed your on-heap cache will work fine.

Error starting org.neo4j.kernel.impl.factory.CommunityFacadeFactory

I have a simple java program trying to connect to the local instance of neo4j to create a sample database to play with but I keep getting;
Exception: Error starting org.neo4j.kernel.impl.factory.CommunityFacadeFactory, /home/mleonard/mbig/neodb/testeroo
Trace:[Ljava.lang.StackTraceElement;#7ce7d377
The stack trace is useless and the error is also not overly helpful.
The server is up and running
Starting Neo4j Server console-mode...
2015-08-11 15:28:06.466-0400 INFO Setting startup timeout to 120000ms
2015-08-11 15:28:23.311-0400 INFO Successfully started database
2015-08-11 15:28:25.920-0400 INFO Starting HTTP on port 7474 (24 threads available)
2015-08-11 15:28:26.972-0400 INFO Enabling HTTPS on port 7473
2015-08-11 15:28:28.247-0400 INFO Mounting static content at /webadmin
2015-08-11 15:28:28.704-0400 INFO Mounting static content at /browser
2015-08-11 15:28:30.321-0400 ERROR The class org.neo4j.server.rest.web.CollectUserAgentFilter is not assignable to the class com.sun.jersey.spi.container.ContainerRequestFilter. This class is ignored.
2015-08-11 15:28:31.677-0400 ERROR The class org.neo4j.server.rest.web.CollectUserAgentFilter is not assignable to the class com.sun.jersey.spi.container.ContainerRequestFilter. This class is ignored.
2015-08-11 15:28:32.165-0400 ERROR The class org.neo4j.server.rest.web.CollectUserAgentFilter is not assignable to the class com.sun.jersey.spi.container.ContainerRequestFilter. This class is ignored.
2015-08-11 15:28:33.031-0400 INFO Remote interface ready and available at http://localhost:7474/
And I can see that the database is created as the files and folders for the new database are created.
import java.io.File;
import org.neo4j.graphdb.GraphDatabaseService;
import org.neo4j.graphdb.RelationshipType;
import org.neo4j.graphdb.factory.GraphDatabaseFactory;
public class Main {
private static enum RelTypes implements RelationshipType
{
WORKS,
FRIENDS,
NEMISIS
}
public static void main(String[] args)
{
System.out.println("Starting ARM4J");
GraphDatabaseService db = null ;
try
{
db = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(new File("/home/mleonard/mbig/neodb/testeroo"))
.loadPropertiesFromFile("/home/mleonard/mbig/neo4j-community-2.3.0-M02/conf/neo4j.properties")
.newGraphDatabase();
/*
try( Transaction tx = db.beginTx() )
{
Node matty = db.createNode();
matty.setProperty("name", "Matthew");
matty.setProperty("age", "31");
matty.setProperty("sex", "male");
Node jay = db.createNode();
jay.setProperty("name", "Jay");;
jay.setProperty("age", "35");
jay.setProperty("sex", "male");
org.neo4j.graphdb.Relationship r = matty.createRelationshipTo(jay, RelTypes.WORKS);
r.setProperty("years", "2");
tx.success();
}
*/
}
catch(Exception x)
{
System.out.println("Exception: " + x.getMessage());
System.out.println("Trace:" + x.getStackTrace().toString());
}
finally
{
if( db != null )
{
db.shutdown();
}
}
System.out.println("Stopping ARM4J");
}
}

Connection to Matrikon OPC Simulation Server via Utgard

I am using the Matrikon OPC Server for Simulation and Testing, instead of TOPServer, along with the tutorial HowToStartWithUtgard. I am not able to connect to the server. This is the error that I get:
15:02:18.452 [main] DEBUG o.j.dcom.transport.JIComTransport - Socket closed... Socket[unconnected] host XXX.XXX.XX.X, port 135
15:02:18.453 [main] WARN org.jinterop.dcom.core.JIComServer - Got the class not registered exception , will attempt setting entries based on status flags...
15:02:18.468 [main] INFO org.openscada.opc.lib.da.Server - Failed to connect to server
org.jinterop.dcom.common.JIException: Class not registered. If you are using a DLL/OCX , please make sure it has "DllSurrogate" flag set. Faq A(6) in readme.html. [0x80040154]
at org.jinterop.dcom.core.JIComServer.init(Unknown Source) ~[org.openscada.jinterop.core_2.0.8.201303051454.jar:na]
at org.jinterop.dcom.core.JIComServer.initialise(Unknown Source) ~[org.openscada.jinterop.core_2.0.8.201303051454.jar:na]
at org.jinterop.dcom.core.JIComServer.<init>(Unknown Source) ~[org.openscada.jinterop.core_2.0.8.201303051454.jar:na]
at org.openscada.opc.lib.da.Server.connect(Server.java:117) ~[org.openscada.opc.lib_1.0.0.201303051455.jar:na]
at com.matrikonopc.utgard.tutorial.UtgardReadTutorial.main(UtgardReadTutorial.java:31) [bin/:na]
Caused by: org.jinterop.dcom.common.JIRuntimeException: Class not registered. If you are using a DLL/OCX , please make sure it has "DllSurrogate" flag set. Faq A(6) in readme.html. [0x80040154]
at org.jinterop.dcom.core.JIRemActivation.read(Unknown Source) ~[org.openscada.jinterop.core_2.0.8.201303051454.jar:na]
at ndr.NdrObject.decode(Unknown Source) ~[org.openscada.jinterop.deps_1.0.0.201303051454.jar:na]
at rpc.ConnectionOrientedEndpoint.call(Unknown Source) ~[org.openscada.jinterop.deps_1.0.0.201303051454.jar:na]
at rpc.Stub.call(Unknown Source) ~[org.openscada.jinterop.deps_1.0.0.201303051454.jar:na]
... 5 common frames omitted
15:02:18.469 [main] INFO org.openscada.opc.lib.da.Server - Destroying DCOM session...
15:02:18.470 [main] INFO org.openscada.opc.lib.da.Server - Destroying DCOM session... forked
80040154: Unknown error (80040154)
15:02:18.499 [OPCSessionDestructor] DEBUG org.openscada.opc.lib.da.Server - Starting destruction of DCOM session
15:02:18.500 [OPCSessionDestructor] INFO org.jinterop.dcom.core.JISession - About to destroy 0 sessesion which are linked to this session: 1325311425
15:02:18.500 [OPCSessionDestructor] INFO o.j.dcom.core.JIComOxidRuntime - destroySessionOIDs for session: 1325311425
15:02:18.500 [OPCSessionDestructor] INFO org.openscada.opc.lib.da.Server - Destructed DCOM session
15:02:18.501 [OPCSessionDestructor] INFO org.openscada.opc.lib.da.Server - Session destruction took 27 ms
I do not know where I should register the Class and what Class it refers to.
It is referring to the clsid you're attempting to use -- it is not in the registry. Can you double check that you're using the correct one for Matrikon OPC Simulation Server?
Working demo, tested on Windows 10 and Java 8.
User must have administrator rights on Windows.
Errors that might occur:
00000005: Login error (does the user has administrator rights !?)
8001FFFF: Firewall, RPC dynamic ports are not open (see below)
80040154: Double check CLSID in registry, below HKEY_CLASSES_ROOT
Firewall rules
netsh advfirewall firewall add rule^
name="DCOM-dynamic"^
dir=in^
action=allow^
protocol=TCP^
localport=RPC^
remoteport=49152-65535
rem the next one does not seems needed
netsh advfirewall firewall add rule name="DCOM" dir=in action=allow protocol=TCP localport=135
Java code
package demo.opc;
import java.util.concurrent.Executors;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.openscada.opc.lib.common.ConnectionInformation;
import org.openscada.opc.lib.da.AccessBase;
import org.openscada.opc.lib.da.DataCallback;
import org.openscada.opc.lib.da.Item;
import org.openscada.opc.lib.da.ItemState;
import org.openscada.opc.lib.da.Server;
import org.openscada.opc.lib.da.SyncAccess;
public class UtgardReaderDemo {
/**
* Main application, arguments are provided as system properties, e.g.<br>
* java -Dhost="localhost" -Duser="admin" -Dpassword="secret" -jar demo.opc.jar<br>
* Tested with a windows user having administrator rights<br>
* #param args unused
* #throws Exception in case of unexpected error
*/
public static void main(String[] args) throws Exception {
Logger.getLogger("org.jinterop").setLevel(Level.ALL); // Quiet => Level.OFF
final String host = System.getProperty("host", "localhost");
final String user = System.getProperty("user", System.getProperty("user.name"));
final String password = System.getProperty("password");
// Powershell: Get-ItemPropertyValue 'Registry::HKCR\Matrikon.OPC.Simulation.1\CLSID' '(default)'
final String clsId = System.getProperty("clsId", "F8582CF2-88FB-11D0-B850-00C0F0104305");
final String itemId = System.getProperty("itemId", "Saw-toothed Waves.Int2");
final ConnectionInformation ci = new ConnectionInformation(user, password);
ci.setHost(host);
ci.setClsid(clsId);
final Server server = new Server(ci, Executors.newSingleThreadScheduledExecutor());
server.connect();
final AccessBase access = new SyncAccess(server, 1000);
access.addItem(itemId, new DataCallback() {
public void changed(final Item item, final ItemState state) {
System.out.println(state);
}
});
access.bind();
Thread.sleep(10_000L);
access.unbind();
}
}
build.gradle
plugins {
id 'java-library'
id 'eclipse'
}
repositories {
mavenCentral()
}
dependencies {
implementation 'org.bouncycastle:bcprov-jdk15on:1.60'
implementation 'org.openscada.utgard:org.openscada.opc.lib:1.5.0'
}
jar {
manifest {
attributes(
'Class-Path': configurations.runtimeClasspath.collect { 'lib/' + it.getName() }.join(' '),
'Main-Class': 'demo.opc.UtgardReaderDemo'
)
}
}
assemble {
dependsOn 'dependenciesCopy'
}
task dependenciesCopy(type: Copy) {
group 'dependencies'
from sourceSets.main.compileClasspath
into "$libsDir/lib"
}

LEAK: ByteBuf.release() was not called in before it's garbage-collected. Spring Reactor TcpServer

I am using reactor-core [1.1.0.RELEASE] , reactor-net [1.1.0.RELEASE] is using netty-all [4.0.18.Final], reactor-spring-context [1.1.0.RELEASE] & Spring Reactor TcpServer [Spring 4.0.3.RELEASE].
I have created simple REST API in netty for health check: /health. I have followed gs-reactor-thumbnailer code
Please see the code as follows:
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpVersion;
import org.springframework.stereotype.Service;
import reactor.function.Consumer;
import reactor.net.NetChannel;
#Service
public class HealthCheckNettyRestApi{
public Consumer<FullHttpRequest> getResponse(NetChannel<FullHttpRequest, FullHttpResponse> channel,int portNumber){
return req -> {
if (req.getMethod() != HttpMethod.GET) {
channel.send(badRequest(req.getMethod()
+ " not supported for this URI"));
} else {
DefaultFullHttpResponse resp = new DefaultFullHttpResponse( HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
resp.content().writeBytes("Hello World".getBytes());
resp.headers().set(HttpHeaders.Names.CONTENT_TYPE, "text/plain");
resp.headers().set(HttpHeaders.Names.CONTENT_LENGTH,resp.content().readableBytes());
//resp.release();
channel.send((resp));
}
};
}
}
In Spring Boot Application I am wiring it as:
#Bean
public ServerSocketOptions serverSocketOptions(){
return new NettyServerSocketOptions().
pipelineConfigurer(pipeline -> pipeline.addLast(new HttpServerCodec()).
addLast(new HttpObjectAggregator(16*1024*1024)));
}
#Autowired
private HealthCheckNettyRestApi healthCheck;
#Value("${netty.port:5555}")
private Integer nettyPort;
#Bean
public NetServer<FullHttpRequest, FullHttpResponse> restApi(Environment env,ServerSocketOptions opts) throws InterruptedException{
NetServer<FullHttpRequest, FullHttpResponse> server =
new TcpServerSpec<FullHttpRequest, FullHttpResponse>(NettyTcpServer.class)
.env(env)
.dispatcher("sync")
.options(opts)
.listen(nettyPort)
.consume(ch -> {
Stream<FullHttpRequest> in = ch.in();
log.info("netty server is humming.....");
in.filter( (FullHttpRequest req) -> (req.getUri().matches(NettyRestConstants.HEALTH_CHECK)))
.when(Throwable.class, NettyHttpSupport.errorHandler(ch))
.consume(healthCheck.getResponse(ch, nettyPort));
}).get();
server.start().await(); //this is working as Tomcat is also deployed due to Spring JPA & web dependencies
return server;
}
When I am running benchmark using wrk:
wrk -t6 -c100 -d30s --latency 'http://localhost:8087/health'
Then I am getting following stack trace:
2015-04-22 17:23:21.072] - 16497 ERROR [reactor-tcp-io-22] --- i.n.u.ResourceLeakDetector: LEAK: ByteBuf.release() was not called before it's garbage-collected. Enable advanced leak reporting to find out where the leak occurred. To enable advanced leak reporting, specify the JVM option '-Dio.netty.leakDetectionLevel=advanced' or call ResourceLeakDetector.setLevel()
2015-04-22 23:09:26.354 ERROR 4308 --- [actor-tcp-io-13] io.netty.util.ResourceLeakDetector : LEAK: ByteBuf.release() was not called before it's garbage-collected.
Recent access records: 0
Created at:
io.netty.buffer.CompositeByteBuf.<init>(CompositeByteBuf.java:59)
io.netty.buffer.Unpooled.compositeBuffer(Unpooled.java:355)
io.netty.handler.codec.http.HttpObjectAggregator.decode(HttpObjectAggregator.java:144)
io.netty.handler.codec.http.HttpObjectAggregator.decode(HttpObjectAggregator.java:49)
io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:89)
io.netty.channel.DefaultChannelHandlerContext.invokeChannelRead(DefaultChannelHandlerContext.java:341)
io.netty.channel.DefaultChannelHandlerContext.fireChannelRead(DefaultChannelHandlerContext.java:327)
io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:155)
io.netty.channel.CombinedChannelDuplexHandler.channelRead(CombinedChannelDuplexHandler.java:148)
io.netty.channel.DefaultChannelHandlerContext.invokeChannelRead(DefaultChannelHandlerContext.java:341)
io.netty.channel.DefaultChannelHandlerContext.fireChannelRead(DefaultChannelHandlerContext.java:327)
io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:785)
io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:116)
io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:494)
io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:461)
io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:378)
io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:350)
io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:116)
java.lang.Thread.run(Thread.java:745)
2015-04-22 23:09:55.217 INFO 4308 --- [actor-tcp-io-13] r.n.netty.NettyNetChannelInboundHandler : [id: 0x260faf6d, /127.0.0.1:50275 => /127.0.0.1:8087] Connection reset by peer
2015-04-22 23:09:55.219 ERROR 4308 --- [actor-tcp-io-13] reactor.core.Reactor : Connection reset by peer
java.io.IOException: Connection reset by peer
at sun.nio.ch.FileDispatcherImpl.read0(Native Method)
at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39)
at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223)
at sun.nio.ch.IOUtil.read(IOUtil.java:192)
at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:380)
at io.netty.buffer.UnpooledUnsafeDirectByteBuf.setBytes(UnpooledUnsafeDirectByteBuf.java:446)
at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:871)
at io.netty.channel.socket.nio.NioSocketChannel.doReadBytes(NioSocketChannel.java:224)
at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:108)
at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:494)
at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:461)
at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:378)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:350)
at io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:116)
at java.lang.Thread.run(Thread.java:745)
My Analyis: I think that since I am forwarding DefaultFullHttpResponse to Spring implementation, Spring APIs should take care about calling release() method. BTW I also tried calling release() method from my implementation but I am still getting the same error.
Could some one tell me what is wrong with the implementation?

Categories

Resources