Riak yokozuna (solr) search stopped working after join - ruby-on-rails

Are there additional steps you need to take after joining new Riak nodes to a cluster to set up yokozuna/solr?
Solr was working on one node. When I joined two additional nodes solr seems to have stopped. Solr is not running any of the nodes, including on the original node that was working before the cluster was created.
Update: I had the original node leave the cluster and restarted it to test it on it's own. It is still throwing the same error.
Update: contents of log/riak/solr.log
2014-11-05 19:34:13,581 [WARN] #CoreAdminHandler.java:495 Creating a core with existing name is not allowed
2014-11-05 19:34:13,582 [ERROR] #SolrException.java:109 org.apache.solr.common.SolrException: Core with name 'normalized_people' already exists.
at org.apache.solr.handler.admin.CoreAdminHandler.handleCreateAction(CoreAdminHandler.java:496)
at org.apache.solr.handler.admin.CoreAdminHandler.handleRequestBody(CoreAdminHandler.java:152)
at org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:135)
at org.apache.solr.servlet.SolrDispatchFilter.handleAdminRequest(SolrDispatchFilter.java:732)
at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:268)
at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:217)
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1419)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:455)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:137)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:557)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:231)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1075)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:384)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:193)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1009)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:135)
at org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:255)
at org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:154)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:116)
at org.eclipse.jetty.server.Server.handle(Server.java:368)
at org.eclipse.jetty.server.AbstractHttpConnection.handleRequest(AbstractHttpConnection.java:489)
at org.eclipse.jetty.server.BlockingHttpConnection.handleRequest(BlockingHttpConnection.java:53)
at org.eclipse.jetty.server.AbstractHttpConnection.headerComplete(AbstractHttpConnection.java:942)
at org.eclipse.jetty.server.AbstractHttpConnection$RequestHandler.headerComplete(AbstractHttpConnection.java:1004)
at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:640)
at org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235)
at org.eclipse.jetty.server.BlockingHttpConnection.handle(BlockingHttpConnection.java:72)
at org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.run(SocketConnector.java:264)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608)
at org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543)
at java.lang.Thread.run(Thread.java:745)
2014-11-05 19:34:13,582 [ERROR] #SolrException.java:120 null:org.apache.solr.common.SolrException: Core with name 'normalized_people' already exists.
at org.apache.solr.handler.admin.CoreAdminHandler.handleCreateAction(CoreAdminHandler.java:496)
at org.apache.solr.handler.admin.CoreAdminHandler.handleRequestBody(CoreAdminHandler.java:152)
at org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:135)
at org.apache.solr.servlet.SolrDispatchFilter.handleAdminRequest(SolrDispatchFilter.java:732)
at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:268)
at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:217)
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1419)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:455)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:137)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:557)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:231)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1075)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:384)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:193)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1009)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:135)
at org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:255)
at org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:154)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:116)
at org.eclipse.jetty.server.Server.handle(Server.java:368)
at org.eclipse.jetty.server.AbstractHttpConnection.handleRequest(AbstractHttpConnection.java:489)
at org.eclipse.jetty.server.BlockingHttpConnection.handleRequest(BlockingHttpConnection.java:53)
at org.eclipse.jetty.server.AbstractHttpConnection.headerComplete(AbstractHttpConnection.java:942)
at org.eclipse.jetty.server.AbstractHttpConnection$RequestHandler.headerComplete(AbstractHttpConnection.java:1004)
at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:640)
at org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235)
at org.eclipse.jetty.server.BlockingHttpConnection.handle(BlockingHttpConnection.java:72)
at org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.run(SocketConnector.java:264)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608)
at org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543)
at java.lang.Thread.run(Thread.java:745)

No additional steps are necessary. Please check the log files, especially /var/log/riak/solr.log for clues as to what the error could be.
Based on your description, reproduction steps should be:
Start one Riak node with search enabled and create a search index (normalized_people)
Start other Riak nodes with search enabled and join to cluster
Check for error

Related

Unnest the nested PCollection using BeamSQL

Try to use BeamSQL for unnest the nested type of PCollection. Lets assume the PCollection which have the Employees and its details. Here details are in nested collection. So if we use the BeamSQL like "SELECT PCOLLECTION.details FROM PCOLLECTION" then getting nested type of details as array collection in the separate PCollection. However when I want to get specific column from the nested type collection as details, then getting error like unable to find the column name. Tried the BeamSQL like (similar like BigQuery SQL) "SELECT X.address FROM PCOLLECTION, Unnest(details) as X" then getting nullpointer exception. Used 2.12.0 apache beam version.
Appreciate some one please help on this.
Below is the sample data of details nested Value (details has email, phone columns. so per row, 'n' no of list of details. Here it has two list of details):
WARNING: printValue:Row:[[Row:[lourdurajan#gmail.com, 9840618047], Row:[lourdurajan#sanmina.com, 9840618047]]]
Here is the Java stacktrace for second select statement:
SELECT `X`.`email`
FROM `beam`.`PCOLLECTION` AS `PCOLLECTION`,
UNNEST(`PCOLLECTION`.`details`) AS `X`
May 08, 2019 11:23:30 AM org.apache.beam.sdk.extensions.sql.impl.BeamQueryPlanner convertToBeamRel
INFO: SQLPlan>
LogicalProject(email=[$3])
LogicalCorrelate(correlation=[$cor0], joinType=[inner], requiredColumns=[{2}])
BeamIOSourceRel(table=[[beam, PCOLLECTION]])
Uncollect
LogicalProject(details=[$cor0.details_2])
LogicalValues(tuples=[[{ 0 }]])
May 08, 2019 11:23:30 AM org.apache.beam.sdk.extensions.sql.impl.BeamQueryPlanner convertToBeamRel
INFO: BEAMPlan>
BeamCalcRel(expr#0..4=[{inputs}], email=[$t3])
BeamUnnestRel(unnestIndex=[2])
BeamIOSourceRel(table=[[beam, PCOLLECTION]])
[WARNING]
java.lang.NullPointerException
at org.apache.beam.sdk.extensions.sql.impl.utils.CalciteUtils.toSchema(CalciteUtils.java:171)
at org.apache.beam.sdk.extensions.sql.impl.rel.BeamUnnestRel$Transform.expand(BeamUnnestRel.java:93)
at org.apache.beam.sdk.extensions.sql.impl.rel.BeamUnnestRel$Transform.expand(BeamUnnestRel.java:87)
at org.apache.beam.sdk.Pipeline.applyInternal(Pipeline.java:537)
at org.apache.beam.sdk.Pipeline.applyTransform(Pipeline.java:488)
at org.apache.beam.sdk.extensions.sql.impl.rel.BeamSqlRelUtils.toPCollection(BeamSqlRelUtils.java:66)
at org.apache.beam.sdk.extensions.sql.impl.rel.BeamSqlRelUtils.lambda$buildPCollectionList$0(BeamSqlRelUtils.java:47)
at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193)
at java.util.Iterator.forEachRemaining(Iterator.java:116)
at java.util.Spliterators$IteratorSpliterator.forEachRemaining(Spliterators.java:1801)
at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:481)
at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:471)
at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708)
at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:499)
at org.apache.beam.sdk.extensions.sql.impl.rel.BeamSqlRelUtils.buildPCollectionList(BeamSqlRelUtils.java:48)
at org.apache.beam.sdk.extensions.sql.impl.rel.BeamSqlRelUtils.toPCollection(BeamSqlRelUtils.java:64)
at org.apache.beam.sdk.extensions.sql.impl.rel.BeamSqlRelUtils.toPCollection(BeamSqlRelUtils.java:36)
at org.apache.beam.sdk.extensions.sql.SqlTransform.expand(SqlTransform.java:111)
at org.apache.beam.sdk.extensions.sql.SqlTransform.expand(SqlTransform.java:79)
at org.apache.beam.sdk.Pipeline.applyInternal(Pipeline.java:537)
at org.apache.beam.sdk.Pipeline.applyTransform(Pipeline.java:488)
at org.apache.beam.sdk.values.PCollection.apply(PCollection.java:370)
at com.sanmina.BeamSQLUnnest.main(BeamSQLUnnest.java:217)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.codehaus.mojo.exec.ExecJavaMojo$1.run(ExecJavaMojo.java:282)
at java.lang.Thread.run(Thread.java:748)
You can achieve this using BigQueryIO.
String Query ="SELECT `X`.`email`
FROM `beam`.`PCOLLECTION` AS `PCOLLECTION`,
UNNEST(`PCOLLECTION`.`details`) AS `X`"
BigQueryIO.readTableRows().fromQuery(query).usingStandardSql()

Avro "not open" exception when writing generic records using Apache Beam

I am using AvroIO.<MyCustomType>writeCustomTypeToGenericRecords() for writing generic records to GCS inside a streaming data flow job. For the first few minutes all seems to be working fine, however, after around 10 minutes, the job starts throwing the following error:
java.lang.RuntimeException: org.apache.beam.sdk.util.UserCodeException: org.apache.avro.AvroRuntimeException: not open
com.google.cloud.dataflow.worker.GroupAlsoByWindowsParDoFn$1.output(GroupAlsoByWindowsParDoFn.java:183)
com.google.cloud.dataflow.worker.GroupAlsoByWindowFnRunner$1.outputWindowedValue(GroupAlsoByWindowFnRunner.java:102)
org.apache.beam.runners.core.ReduceFnRunner.lambda$onTrigger$1(ReduceFnRunner.java:1057)
org.apache.beam.runners.core.ReduceFnContextFactory$OnTriggerContextImpl.output(ReduceFnContextFactory.java:438)
org.apache.beam.runners.core.SystemReduceFn.onTrigger(SystemReduceFn.java:125)
org.apache.beam.runners.core.ReduceFnRunner.onTrigger(ReduceFnRunner.java:1060)
org.apache.beam.runners.core.ReduceFnRunner.onTimers(ReduceFnRunner.java:768)
com.google.cloud.dataflow.worker.StreamingGroupAlsoByWindowViaWindowSetFn.processElement(StreamingGroupAlsoByWindowViaWindowSetFn.java:95)
com.google.cloud.dataflow.worker.StreamingGroupAlsoByWindowViaWindowSetFn.processElement(StreamingGroupAlsoByWindowViaWindowSetFn.java:42)
com.google.cloud.dataflow.worker.GroupAlsoByWindowFnRunner.invokeProcessElement(GroupAlsoByWindowFnRunner.java:115)
com.google.cloud.dataflow.worker.GroupAlsoByWindowFnRunner.processElement(GroupAlsoByWindowFnRunner.java:73)
org.apache.beam.runners.core.LateDataDroppingDoFnRunner.processElement(LateDataDroppingDoFnRunner.java:80)
com.google.cloud.dataflow.worker.GroupAlsoByWindowsParDoFn.processElement(GroupAlsoByWindowsParDoFn.java:133)
com.google.cloud.dataflow.worker.util.common.worker.ParDoOperation.process(ParDoOperation.java:43)
com.google.cloud.dataflow.worker.util.common.worker.OutputReceiver.process(OutputReceiver.java:48)
com.google.cloud.dataflow.worker.util.common.worker.ReadOperation.runReadLoop(ReadOperation.java:200)
com.google.cloud.dataflow.worker.util.common.worker.ReadOperation.start(ReadOperation.java:158)
com.google.cloud.dataflow.worker.util.common.worker.MapTaskExecutor.execute(MapTaskExecutor.java:75)
com.google.cloud.dataflow.worker.StreamingDataflowWorker.process(StreamingDataflowWorker.java:1227)
com.google.cloud.dataflow.worker.StreamingDataflowWorker.access$1000(StreamingDataflowWorker.java:136)
com.google.cloud.dataflow.worker.StreamingDataflowWorker$6.run(StreamingDataflowWorker.java:966)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.beam.sdk.util.UserCodeException: org.apache.avro.AvroRuntimeException: not open
org.apache.beam.sdk.util.UserCodeException.wrap(UserCodeException.java:34)
org.apache.beam.sdk.io.WriteFiles$WriteShardsIntoTempFilesFn$DoFnInvoker.invokeProcessElement(Unknown Source)
org.apache.beam.runners.core.SimpleDoFnRunner.invokeProcessElement(SimpleDoFnRunner.java:275)
org.apache.beam.runners.core.SimpleDoFnRunner.processElement(SimpleDoFnRunner.java:237)
com.google.cloud.dataflow.worker.StreamingSideInputDoFnRunner.processElement(StreamingSideInputDoFnRunner.java:72)
com.google.cloud.dataflow.worker.SimpleParDoFn.processElement(SimpleParDoFn.java:324)
com.google.cloud.dataflow.worker.util.common.worker.ParDoOperation.process(ParDoOperation.java:43)
com.google.cloud.dataflow.worker.util.common.worker.OutputReceiver.process(OutputReceiver.java:48)
com.google.cloud.dataflow.worker.GroupAlsoByWindowsParDoFn$1.output(GroupAlsoByWindowsParDoFn.java:181)
com.google.cloud.dataflow.worker.GroupAlsoByWindowFnRunner$1.outputWindowedValue(GroupAlsoByWindowFnRunner.java:102)
org.apache.beam.runners.core.ReduceFnRunner.lambda$onTrigger$1(ReduceFnRunner.java:1057)
org.apache.beam.runners.core.ReduceFnContextFactory$OnTriggerContextImpl.output(ReduceFnContextFactory.java:438)
org.apache.beam.runners.core.SystemReduceFn.onTrigger(SystemReduceFn.java:125)
org.apache.beam.runners.core.ReduceFnRunner.onTrigger(ReduceFnRunner.java:1060)
org.apache.beam.runners.core.ReduceFnRunner.onTimers(ReduceFnRunner.java:768)
com.google.cloud.dataflow.worker.StreamingGroupAlsoByWindowViaWindowSetFn.processElement(StreamingGroupAlsoByWindowViaWindowSetFn.java:95)
com.google.cloud.dataflow.worker.StreamingGroupAlsoByWindowViaWindowSetFn.processElement(StreamingGroupAlsoByWindowViaWindowSetFn.java:42)
com.google.cloud.dataflow.worker.GroupAlsoByWindowFnRunner.invokeProcessElement(GroupAlsoByWindowFnRunner.java:115)
com.google.cloud.dataflow.worker.GroupAlsoByWindowFnRunner.processElement(GroupAlsoByWindowFnRunner.java:73)
org.apache.beam.runners.core.LateDataDroppingDoFnRunner.processElement(LateDataDroppingDoFnRunner.java:80)
com.google.cloud.dataflow.worker.GroupAlsoByWindowsParDoFn.processElement(GroupAlsoByWindowsParDoFn.java:133)
com.google.cloud.dataflow.worker.util.common.worker.ParDoOperation.process(ParDoOperation.java:43)
com.google.cloud.dataflow.worker.util.common.worker.OutputReceiver.process(OutputReceiver.java:48)
com.google.cloud.dataflow.worker.util.common.worker.ReadOperation.runReadLoop(ReadOperation.java:200)
com.google.cloud.dataflow.worker.util.common.worker.ReadOperation.start(ReadOperation.java:158)
com.google.cloud.dataflow.worker.util.common.worker.MapTaskExecutor.execute(MapTaskExecutor.java:75)
com.google.cloud.dataflow.worker.StreamingDataflowWorker.process(StreamingDataflowWorker.java:1227)
com.google.cloud.dataflow.worker.StreamingDataflowWorker.access$1000(StreamingDataflowWorker.java:136)
com.google.cloud.dataflow.worker.StreamingDataflowWorker$6.run(StreamingDataflowWorker.java:966)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.avro.AvroRuntimeException: not open
org.apache.avro.file.DataFileWriter.assertOpen(DataFileWriter.java:82)
org.apache.avro.file.DataFileWriter.append(DataFileWriter.java:299)
org.apache.beam.sdk.io.AvroSink$AvroWriter.write(AvroSink.java:123)
org.apache.beam.sdk.io.WriteFiles.writeOrClose(WriteFiles.java:550)
org.apache.beam.sdk.io.WriteFiles.access$1000(WriteFiles.java:112)
org.apache.beam.sdk.io.WriteFiles$WriteShardsIntoTempFilesFn.processElement(WriteFiles.java:718)
The data flow job continues to run fine though. Just to give some background about the streaming job: it pulls messages from Pub/Sub, creates a fixed window of 5 minutes with a trigger of 10,000 messages ( whichever comes first), processes the messages and finally writes to a GCP bucket whereby each specific type of message goes to a specific folder based on the type of the message using .to(new AvroEventDynamicDestinations(avroBaseDir, schemaView)).
UPDATE 1: Looking at the timestamp of this error, it seems to be coming up exactly with an interval of 10 seconds, so 6 per minute.
I had exactly same exception. My problem came from wrong schema, null schema to be exact (not found by schema registry)

How to use MQTT with Tomcat

I'd like to use MQTT in tomcat8 in order to publish messages via MQTT. I use the Paho library version 3.1.1. When I try to instantiate the client using
String clientId = Mqtt.generateClientId();
MqttClient client = new MqttClient("tcp://localhost:1883", clientId);
I get an MqttException(0).
Below you'll find the details of the error message and the stacktrace:
reason 0
msg MqttException
loc MqttException
cause null
excep MqttException (0)
MqttException (0)
at org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence.open(MqttDefaultFilePersistence.java:80)
at org.eclipse.paho.client.mqttv3.MqttAsyncClient.<init>(MqttAsyncClient.java:304)
at org.eclipse.paho.client.mqttv3.MqttAsyncClient.<init>(MqttAsyncClient.java:185)
at org.eclipse.paho.client.mqttv3.MqttClient.<init>(MqttClient.java:226)
at org.eclipse.paho.client.mqttv3.MqttClient.<init>(MqttClient.java:138)
at de.rz.homeautomation.service.HomeAutomationService.publishSensorData(HomeAutomationService.java:807)
at de.rz.homeautomation.service.HomeAutomationService.postSensorData(HomeAutomationService.java:857)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory$1.invoke(ResourceMethodInvocationHandlerFactory.java:81)
at org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:151)
at org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:171)
at org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:152)
at org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:104)
at org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:387)
at org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:331)
at org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:103)
at org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:271)
at org.glassfish.jersey.internal.Errors$1.call(Errors.java:271)
at org.glassfish.jersey.internal.Errors$1.call(Errors.java:267)
at org.glassfish.jersey.internal.Errors.process(Errors.java:315)
at org.glassfish.jersey.internal.Errors.process(Errors.java:297)
at org.glassfish.jersey.internal.Errors.process(Errors.java:267)
at org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:297)
at org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:254)
at org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:1030)
at org.glassfish.jersey.servlet.WebComponent.service(WebComponent.java:373)
at org.glassfish.jersey.servlet.ServletContainer.service(ServletContainer.java:381)
at org.glassfish.jersey.servlet.ServletContainer.service(ServletContainer.java:344)
at org.glassfish.jersey.servlet.ServletContainer.service(ServletContainer.java:221)
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:291)
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:206)
at org.apache.tomcat.websocket.server.WsFilter.doFilter(WsFilter.java:52)
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:239)
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:206)
at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:219)
at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:106)
at org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:506)
at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:142)
at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:79)
at org.apache.catalina.valves.AbstractAccessLogValve.invoke(AbstractAccessLogValve.java:610)
at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:88)
at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:537)
at org.apache.coyote.http11.AbstractHttp11Processor.process(AbstractHttp11Processor.java:1081)
at org.apache.coyote.AbstractProtocol$AbstractConnectionHandler.process(AbstractProtocol.java:658)
at org.apache.coyote.http11.Http11NioProtocol$Http11ConnectionHandler.process(Http11NioProtocol.java:222)
at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1580)
at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.run(NioEndpoint.java:1537)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
at java.lang.Thread.run(Thread.java:745)
The same code works fine in a command line program, so I guess I have to do some kind of configuration of MQTT and/or tomcat. I also don't know if I need additional libraries. I just copied the paho library in the lib-folder of WEB-INF.
Just right now I am only guessing what could be the problem.
Can somebody explain me how I have to configure tomcat or where I have to move the library?
Thanks a lot,
Rudi
The exception is being thrown when the MQTT Client tries to open the persistence file, I will assume it is probably a permisions error for the default location and the user running Tomcat.
You probably need to set the path by passing in a MQTTDefaultFilePersistence object to the MQTTClient() constructor after the client ID or pass in a MemoryPersistence to remove the need to store to disk at all (this will potentially loose messages if Tomcat crashes).

Neo4j 2.1.6: Fixing corrupted relationships and nodes

I realize the db is old and this is a long shot...
I've got an older project that has been running on Neo4j 2.1.6 with no issues. Yesterday I started getting [{Neo.DatabaseError.Statement.ExecutionFailure }] errors when I try to make updates to the graph using Cypher via the transaction endpoint.
I finally tracked down the problem to a couple of relationships attached to a couple of nodes that seem to be corrupted/invalid. Trying to delete them on the console just returns 'null' and trying to delete them via the transaction endpoint leads to:
{"code":"Neo.DatabaseError.Statement.ExecutionFailure","message":null,"stackTrace":"java.lang.NullPointerException
org.neo4j.kernel.impl.nioneo.xa.RelationshipDeleter.updateNodesForDeletedRelationship(RelationshipDeleter.java:187)
org.neo4j.kernel.impl.nioneo.xa.RelationshipDeleter.relDelete(RelationshipDeleter.java:67)
org.neo4j.kernel.impl.nioneo.xa.NeoStoreTransactionContext.relationshipDelete(NeoStoreTransactionContext.java:85)
org.neo4j.kernel.impl.nioneo.xa.NeoStoreTransaction.relDelete(NeoStoreTransaction.java:818)
org.neo4j.kernel.impl.persistence.PersistenceManager.relDelete(PersistenceManager.java:140)
org.neo4j.kernel.impl.core.NodeManager.deleteRelationship(NodeManager.java:780)
org.neo4j.kernel.impl.api.state.OldTxStateBridgeImpl.deleteRelationship(OldTxStateBridgeImpl.java:164)
org.neo4j.kernel.impl.api.state.TxStateImpl.relationshipDoDelete(TxStateImpl.java:439)
org.neo4j.kernel.impl.api.StateHandlingStatementOperations$1.visit(StateHandlingStatementOperations.java:138)
org.neo4j.kernel.impl.api.store.CacheLayer.visit(CacheLayer.java:476)
org.neo4j.kernel.impl.api.StateHandlingStatementOperations.relationshipDelete(StateHandlingStatementOperations.java:133)
org.neo4j.kernel.impl.api.ConstraintEnforcingEntityOperations.relationshipDelete(ConstraintEnforcingEntityOperations.java:159)
org.neo4j.kernel.impl.api.LockingStatementOperations.relationshipDelete(LockingStatementOperations.java:231)
org.neo4j.kernel.impl.api.OperationsFacade.relationshipDelete(OperationsFacade.java:528)
org.neo4j.cypher.internal.spi.v2_1.TransactionBoundQueryContext$RelationshipOperations.delete(TransactionBoundQueryContext.scala:180)
org.neo4j.cypher.internal.spi.v2_1.TransactionBoundQueryContext$RelationshipOperations.delete(TransactionBoundQueryContext.scala:178)
org.neo4j.cypher.internal.compiler.v2_1.spi.DelegatingOperations.delete(DelegatingQueryContext.scala:110)
org.neo4j.cypher.internal.compiler.v2_1.spi.ExceptionTranslatingQueryContext$ExceptionTranslatingOperations.org$neo4j$cypher$internal$compiler$v2_1$spi$ExceptionTranslatingQueryContext$ExceptionTranslatingOperations$$super$delete(ExceptionTranslatingQueryContext.scala:118)
org.neo4j.cypher.internal.compiler.v2_1.spi.ExceptionTranslatingQueryContext$ExceptionTranslatingOperations$$anonfun$delete$1.apply$mcV$sp(ExceptionTranslatingQueryContext.scala:118)
org.neo4j.cypher.internal.compiler.v2_1.spi.ExceptionTranslatingQueryContext$ExceptionTranslatingOperations$$anonfun$delete$1.apply(ExceptionTranslatingQueryContext.scala:118)
org.neo4j.cypher.internal.compiler.v2_1.spi.ExceptionTranslatingQueryContext$ExceptionTranslatingOperations$$anonfun$delete$1.apply(ExceptionTranslatingQueryContext.scala:118)
org.neo4j.cypher.internal.compiler.v2_1.spi.ExceptionTranslatingQueryContext.org$neo4j$cypher$internal$compiler$v2_1$spi$ExceptionTranslatingQueryContext$$translateException(ExceptionTranslatingQueryContext.scala:152)
org.neo4j.cypher.internal.compiler.v2_1.spi.ExceptionTranslatingQueryContext$ExceptionTranslatingOperations.delete(ExceptionTranslatingQueryContext.scala:118)
org.neo4j.cypher.internal.compiler.v2_1.spi.UpdateCountingQueryContext$CountingOps.delete(UpdateCountingQueryContext.scala:120)
org.neo4j.cypher.internal.compiler.v2_1.mutation.DeleteEntityAction.org$neo4j$cypher$internal$compiler$v2_1$mutation$DeleteEntityAction$$delete(DeleteEntityAction.scala:51)
org.neo4j.cypher.internal.compiler.v2_1.mutation.DeleteEntityAction.exec(DeleteEntityAction.scala:37)
org.neo4j.cypher.internal.compiler.v2_1.pipes.ExecuteUpdateCommandsPipe.org$neo4j$cypher$internal$compiler$v2_1$pipes$ExecuteUpdateCommandsPipe$$exec(ExecuteUpdateCommandsPipe.scala:57)
org.neo4j.cypher.internal.compiler.v2_1.pipes.ExecuteUpdateCommandsPi$$$$1019fdff8b266d7d9d5647386930b3d8$$$$ands$1$$anonfun$apply$2.apply(ExecuteUpdateCommandsPipe.scala:46)
org.neo4j.cypher.internal.compiler.v2_1.pipes.ExecuteUpdateCommandsPi$$$$1019fdff8b266d7d9d5647386930b3d8$$$$ands$1$$anonfun$apply$2.apply(ExecuteUpdateCommandsPipe.scala:46)
scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
org.neo4j.cypher.internal.compiler.v2_1.pipes.EmptyResultPipe.internalCreateResults(EmptyResultPipe.scala:29)
org.neo4j.cypher.internal.compiler.v2_1.pipes.PipeWithSource.createResults(Pipe.scala:105)
org.neo4j.cypher.internal.compiler.v2_1.executionplan.ExecutionPlanBuilder$$anonfun$getExecutionPlanFunction$1$$anonfun$apply$2.apply(ExecutionPlanBuilder.scala:120)
org.neo4j.cypher.internal.compiler.v2_1.executionplan.ExecutionPlanBuilder$$anonfun$getExecutionPlanFunction$1$$anonfun$apply$2.apply(ExecutionPlanBuilder.scala:119)
org.neo4j.cypher.internal.compiler.v2_1.executionplan.ExecutionWorkflowBuilder.runWithQueryState(ExecutionPlanBuilder.scala:168)
org.neo4j.cypher.internal.compiler.v2_1.executionplan.ExecutionPlanBuilder$$anonfun$getExecutionPlanFunction$1.apply(ExecutionPlanBuilder.scala:118)
org.neo4j.cypher.internal.compiler.v2_1.executionplan.ExecutionPlanBuilder$$anonfun$getExecutionPlanFunction$1.apply(ExecutionPlanBuilder.scala:103)
org.neo4j.cypher.internal.compiler.v2_1.executionplan.ExecutionPlanBuilder$$anon$1.execute(ExecutionPlanBuilder.scala:68)
org.neo4j.cypher.internal.compiler.v2_1.executionplan.ExecutionPlanBuilder$$anon$1.execute(ExecutionPlanBuilder.scala:67)
org.neo4j.cypher.internal.ExecutionPlanWrapperForV2_1.execute(CypherCompiler.scala:159)
org.neo4j.cypher.ExecutionEngine.execute(ExecutionEngine.scala:76)
org.neo4j.cypher.ExecutionEngine.execute(ExecutionEngine.scala:71)
org.neo4j.cypher.javacompat.ExecutionEngine.execute(ExecutionEngine.java:84)
org.neo4j.server.rest.transactional.TransactionHandle.executeStatements(TransactionHandle.java:277)
org.neo4j.server.rest.transactional.TransactionHandle.commit(TransactionHandle.java:139)
org.neo4j.server.rest.web.TransactionalService$2.write(TransactionalService.java:194)
com.sun.jersey.core.impl.provider.entity.StreamingOutputProvider.writeTo(StreamingOutputProvider.java:71)
com.sun.jersey.core.impl.provider.entity.StreamingOutputProvider.writeTo(StreamingOutputProvider.java:57)
com.sun.jersey.spi.container.ContainerResponse.write(ContainerResponse.java:306)
com.sun.jersey.server.impl.application.WebApplicationImpl._handleRequest(WebApplicationImpl.java:1437)
com.sun.jersey.server.impl.application.WebApplicationImpl.handleRequest(WebApplicationImpl.java:1349)
com.sun.jersey.server.impl.application.WebApplicationImpl.handleRequest(WebApplicationImpl.java:1339)
com.sun.jersey.spi.container.servlet.WebComponent.service(WebComponent.java:416)
com.sun.jersey.spi.container.servlet.ServletContainer.service(ServletContainer.java:537)
com.sun.jersey.spi.container.servlet.ServletContainer.service(ServletContainer.java:699)
javax.servlet.http.HttpServlet.service(HttpServlet.java:848)
org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:698)
org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1506)
org.neo4j.server.guard.GuardingRequestFilter.doFilter(GuardingRequestFilter.java:68)
org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1477)
ch.qos.logback.access.servlet.TeeFilter.doFilter(TeeFilter.java:55)
org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1477)
org.neo4j.server.guard.GuardingRequestFilter.doFilter(GuardingRequestFilter.java:68)
org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1477)
org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:503)
org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:211)
org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1096)
org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:432)
org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:175)
org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1030)
org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:136)
org.eclipse.jetty.server.handler.HandlerList.handle(HandlerList.java:52)
org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:97)
org.eclipse.jetty.server.handler.RequestLogHandler.handle(RequestLogHandler.java:92)
org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:97)
org.eclipse.jetty.server.Server.handle(Server.java:445)
org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:268)
org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:229)
org.eclipse.jetty.io.AbstractConnection$ReadCallback.run(AbstractConnection.java:358)
org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:601)
org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:532)
java.lang.Thread.run(Thread.java:745)\n"
Is there anyway to nuke these relationships? Looks like the first one was corrupted two days ago and I'd prefer not to lose two days of data if at all possible.
Just in case anybody else finds their way here, Michael Hunger wrote a tool that will copy a database and skip anything that is broken which is exactly what I needed. You can find it at https://github.com/jexp/store-utils/tree/21.
Turns out all of my issues were due to one relationship attached to a NULL node... Ran the tool, copied over the fixed up database and everything is working perfectly again.

/db/data/batch endpoint running out of memory

I am currently sending json batches (mass creating nodes and relationships) to my neo4j instance at http://host:7474/db/data/batch. These batches have grown in size and now I am getting the error shown below.
Seemingly this error is caused when trying to create the response to the request, which is just too big and blowing up an Array?
Is there a way around this and still allow the use of the json batch endpoint? The json is being created by a downstream process and so moving to some other mechanism (e.g. csv loading) is not possible due to the necessary changes in other processes.
Is there any way around this? It is not possible to do multiple smaller batches because of the many relationships between the nodes being created. Later nodes and relationships being created refer to node IDs from earlier on the batch. If I break the batch up then I'll refer to node IDs which don't exist in the current transaction.
2015-05-27 13:15:16.425+0000 INFO [API] Remote interface ready and available at [http://localhost:7474/]
09:18:38.822 [qtp20290798-38] WARN o.e.jetty.servlet.ServletHandler -
javax.servlet.ServletException: java.lang.OutOfMemoryError: Requested array size exceeds VM limit
at com.sun.jersey.spi.container.servlet.WebComponent.service(WebComponent.java:420) ~[jersey-servlet-1.18.1.jar:1.18.1]
at com.sun.jersey.spi.container.servlet.ServletContainer.service(ServletContainer.java:540) ~[jersey-servlet-1.18.1.jar:1.18.1]
at com.sun.jersey.spi.container.servlet.ServletContainer.service(ServletContainer.java:715) ~[jersey-servlet-1.18.1.jar:1.18.1]
at javax.servlet.http.HttpServlet.service(HttpServlet.java:790) ~[javax.servlet-api-3.1.0.jar:3.1.0]
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:800) ~[jetty-servlet-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1669) ~[jetty-servlet-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.neo4j.server.rest.dbms.AuthorizationFilter.doFilter(AuthorizationFilter.java:120) ~[neo4j-server-2.2.1.jar:2.2.1]
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1652) ~[jetty-servlet-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:585) [jetty-servlet-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:221) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1125) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:515) [jetty-servlet-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:185) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1059) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.handler.HandlerList.handle(HandlerList.java:52) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:97) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.Server.handle(Server.java:497) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:310) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:248) [jetty-server-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.io.AbstractConnection$2.run(AbstractConnection.java:540) [jetty-io-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:620) [jetty-util-9.2.4.v20141103.jar:9.2.4.v20141103]
at org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:540) [jetty-util-9.2.4.v20141103.jar:9.2.4.v20141103]
at java.lang.Thread.run(Thread.java:745) [na:1.7.0_75]
Caused by: java.lang.OutOfMemoryError: Requested array size exceeds VM limit
at java.util.Arrays.copyOf(Arrays.java:2367) ~[na:1.7.0_75]
.
.
.
Limit it to 10k nodes that you create and use -H X-Stream:true as header then it doesn't build up gigantic json response strings in memory.

Resources