Connection error while connecting wamp server with netbeans8.1 - connection

I can't get any output from this program. Do you know why? I am using NetBeans 8.1 and wamp server. I design database named assignments online. Can you also suggest if I am using true drivers?
package managmentSystem;
import javax.swing.*;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;
import java.sql.SQLException;
/**
*
* #author DiyaBangesh
*/
public class ManagmentSystem {
//PreparedStatement pst = null;
//Statement st= null;
/**
* #param args the command line arguments
*/
public static void main(String args[]) {
try{
ResultSet rs;
Connection conn;
Class.forName("com.mysql.jdbc.Driver");
conn = DriverManager.getConnection("jdbc:mysql://localhost/assignment","root","");
JOptionPane.showMessageDialog(null,"Connection to database is established");
//return conn;
Statement st = conn.createStatement();
String sql ="SELECT * FROM userdetails";
rs = st.executeQuery(sql);
while(rs.next()){
String nam = rs.getString("name");
String uid = rs.getString("user_id");
String pwd = rs.getString("password");
String eid = rs.getString("email_id");
String con = rs.getString("contact");
String ut = rs.getString("usertype");
System.out.println(nam + " " + uid + " " + pwd + " "+ eid + " " + con + " " + ut );
}
conn.close();
}
catch (Exception sqlEx){
System.out.println(sqlEx);
}
}
}

mysql-connector-java-bin.jar is the one needeed and you have already used it. Make sure that you used correct database name & correct password. Replace 'yourpassword' with your database password. And make sure that server is running.
Follow as below.
public class ManagmentSystem {
Connection con;
Statement s;
PreparedStatement ps;
ResultSet rs;
ManagmentSystem()
{
try
{ Class.forName("com.mysql.jdbc.Driver");
con=DriverManager.getConnection("jdbc:mysql://localhost/assignment?user=root&password=yourpassword");
}
catch(SQLException s)
{
System.out.println("Error in DB Connection");
s.printStackTrace();
}
catch(ClassNotFoundException c)
{
System.out.println("Driver not Found");
}
}
}

Related

Reading data from kinesis stream unsuccessfully

I am working with Amazon Kinesis data streams. My Kinesis stream consists of only one shard.
I am trying to read data (records) from the stream after writing some data (records) to the same stream. My records are simple JSON's.
I can see through the Amazon console the readings and the writings.
When I try to print the content of the record with "record.getData()" I got this error :
java.nio.HeapByteBuffer[pos=4 lim=4 cap=4]
20:35:59.118 [RecordProcessor-0000] WARN com.kinesisconsumer.AmazonKinesisApplicationSampleRecordProcessor - Caught throwable while processing record UserRecord [subSequenceNumber=0, explicitHashKey=null, aggregated=false, getSequenceNumber()=49593662497507120518174908605360552573875197411355262978, getData()=java.nio.HeapByteBuffer[pos=4 lim=4 cap=4], getPartitionKey()=12345]
java.lang.StringIndexOutOfBoundsException: String index out of range: -9
at java.lang.String.substring(String.java:1931)
at com.kinesisconsumer.AmazonKinesisApplicationSampleRecordProcessor.processSingleRecord(AmazonKinesisApplicationSampleRecordProcessor.java:112)
at com.kinesisconsumer.AmazonKinesisApplicationSampleRecordProcessor.processRecordsWithRetries(AmazonKinesisApplicationSampleRecordProcessor.java:75)
at com.kinesisconsumer.AmazonKinesisApplicationSampleRecordProcessor.processRecords(AmazonKinesisApplicationSampleRecordProcessor.java:53)
at com.amazonaws.services.kinesis.clientlibrary.lib.worker.V1ToV2RecordProcessorAdapter.processRecords(V1ToV2RecordProcessorAdapter.java:42)
at com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask.callProcessRecords(ProcessTask.java:221)
at com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask.call(ProcessTask.java:176)
at com.amazonaws.services.kinesis.clientlibrary.lib.worker.MetricsCollectingTaskDecorator.call(MetricsCollectingTaskDecorator.java:49)
at com.amazonaws.services.kinesis.clientlibrary.lib.worker.MetricsCollectingTaskDecorator.call(MetricsCollectingTaskDecorator.java:24)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Here is my code :
public class AmazonKinesisApplicationRecordProcessorFactory implements IRecordProcessorFactory {
/**
* {#inheritDoc}
*/
#Override
public IRecordProcessor createProcessor() {
return new AmazonKinesisApplicationSampleRecordProcessor();
}
}
public final class AmazonKinesisApplicationSample {
public static final String SAMPLE_APPLICATION_STREAM_NAME = "LimorKinesis";
private static final String SAMPLE_APPLICATION_NAME = "SampleKinesisApplication";
// Initial position in the stream when the application starts up for the first time.
// Position can be one of LATEST (most recent data) or TRIM_HORIZON (oldest available data)
private static final InitialPositionInStream SAMPLE_APPLICATION_INITIAL_POSITION_IN_STREAM =
InitialPositionInStream.LATEST;
private static ProfileCredentialsProvider credentialsProvider;
private static void init() {
// Ensure the JVM will refresh the cached IP values of AWS resources (e.g. service endpoints).
java.security.Security.setProperty("networkaddress.cache.ttl", "60");
/*
* The ProfileCredentialsProvider will return your [default]
* credential profile by reading from the credentials file located at
* (~/.aws/credentials).
*/
credentialsProvider = new ProfileCredentialsProvider();
try {
credentialsProvider.getCredentials();
} catch (Exception e) {
throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
+ "Please make sure that your credentials file is at the correct "
+ "location (~/.aws/credentials), and is in valid format.", e);
}
}
public static void main(String[] args) throws Exception {
init();
if (args.length == 1 && "delete-resources".equals(args[0])) {
deleteResources();
return;
}
String workerId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + UUID.randomUUID();
KinesisClientLibConfiguration kinesisClientLibConfiguration =
new KinesisClientLibConfiguration(SAMPLE_APPLICATION_NAME,
SAMPLE_APPLICATION_STREAM_NAME,
credentialsProvider,
workerId);
kinesisClientLibConfiguration.withInitialPositionInStream(SAMPLE_APPLICATION_INITIAL_POSITION_IN_STREAM);
kinesisClientLibConfiguration.withRegionName("us-west-2");//todo : added region west-2
IRecordProcessorFactory recordProcessorFactory = new AmazonKinesisApplicationRecordProcessorFactory();
Worker worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration);
System.out.printf("Running %s to process stream %s as worker %s...\n",
SAMPLE_APPLICATION_NAME,
SAMPLE_APPLICATION_STREAM_NAME,
workerId);
int exitCode = 0;
try {
worker.run();
} catch (Throwable t) {
System.err.println("Caught throwable while processing data.");
t.printStackTrace();
exitCode = 1;
}
System.exit(exitCode);
}
public static void deleteResources() {
// Delete the stream
AmazonKinesis kinesis = AmazonKinesisClientBuilder.standard()
.withCredentials(credentialsProvider)
.withRegion("us-west-2")
.build();
System.out.printf("Deleting the Amazon Kinesis stream used by the sample. Stream Name = %s.\n",
SAMPLE_APPLICATION_STREAM_NAME);
try {
kinesis.deleteStream(SAMPLE_APPLICATION_STREAM_NAME);
} catch (ResourceNotFoundException ex) {
// The stream doesn't exist.
}
// Delete the table
AmazonDynamoDB dynamoDB = AmazonDynamoDBClientBuilder.standard()
.withCredentials(credentialsProvider)
.withRegion("us-west-2")
.build();
System.out.printf("Deleting the Amazon DynamoDB table used by the Amazon Kinesis Client Library. Table Name = %s.\n",
SAMPLE_APPLICATION_NAME);
try {
dynamoDB.deleteTable(SAMPLE_APPLICATION_NAME);
} catch (com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException ex) {
// The table doesn't exist.
}
}
}
public class AmazonKinesisApplicationSampleRecordProcessor implements IRecordProcessor {
private static final Log LOG = LogFactory.getLog(AmazonKinesisApplicationSampleRecordProcessor.class);
private String kinesisShardId;
// Backoff and retry settings
private static final long BACKOFF_TIME_IN_MILLIS = 3000L;
private static final int NUM_RETRIES = 10;
// Checkpoint about once a minute
private static final long CHECKPOINT_INTERVAL_MILLIS = 60000L;
private long nextCheckpointTimeInMillis;
private final CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder();
/**
* {#inheritDoc}
*/
#Override
public void initialize(String shardId) {
LOG.info("Initializing record processor for shard: " + shardId);
this.kinesisShardId = shardId;
}
/**
* {#inheritDoc}
*/
#Override
public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) {
LOG.info("Processing " + records.size() + " records from " + kinesisShardId);
// Process records and perform all exception handling.
processRecordsWithRetries(records);
// Checkpoint once every checkpoint interval.
if (System.currentTimeMillis() > nextCheckpointTimeInMillis) {
checkpoint(checkpointer);
nextCheckpointTimeInMillis = System.currentTimeMillis() + CHECKPOINT_INTERVAL_MILLIS;
}
}
/**
* Process records performing retries as needed. Skip "poison pill" records.
*
* #param records Data records to be processed.
*/
private void processRecordsWithRetries(List<Record> records) {
for (Record record : records) {
boolean processedSuccessfully = false;
for (int i = 0; i < NUM_RETRIES; i++) {
try {
//
// Logic to process record goes here.
//
processSingleRecord(record);
processedSuccessfully = true;
break;
} catch (Throwable t) {
LOG.warn("Caught throwable while processing record " + record, t);
}
// backoff if we encounter an exception.
try {
Thread.sleep(BACKOFF_TIME_IN_MILLIS);
} catch (InterruptedException e) {
LOG.debug("Interrupted sleep", e);
}
}
if (!processedSuccessfully) {
LOG.error("Couldn't process record " + record + ". Skipping the record.");
}
}
}
/**
* Process a single record.
*
* #param record The record to be processed.
*/
private void processSingleRecord(Record record) {
System.out.println(record.getData());
String data = null;
try {
// For this app, we interpret the payload as UTF-8 chars.
data = decoder.decode(record.getData()).toString();
// Assume this record came from AmazonKinesisSample and log its age.
long recordCreateTime = new Long(data.substring("testData-".length()));
long ageOfRecordInMillis = System.currentTimeMillis() - recordCreateTime;
LOG.info(record.getSequenceNumber() + ", " + record.getPartitionKey() + ", " + data + ", Created "
+ ageOfRecordInMillis + " milliseconds ago.");
} catch (NumberFormatException e) {
LOG.info("Record does not match sample record format. Ignoring record with data; " + data);
} catch (CharacterCodingException e) {
LOG.error("Malformed data: " + data, e);
}
}
/**
* {#inheritDoc}
*/
#Override
public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) {
LOG.info("Shutting down record processor for shard: " + kinesisShardId);
// Important to checkpoint after reaching end of shard, so we can start processing data from child shards.
if (reason == ShutdownReason.TERMINATE) {
checkpoint(checkpointer);
}
}
/** Checkpoint with retries.
* #param checkpointer
*/
private void checkpoint(IRecordProcessorCheckpointer checkpointer) {
LOG.info("Checkpointing shard " + kinesisShardId);
for (int i = 0; i < NUM_RETRIES; i++) {
try {
checkpointer.checkpoint();
break;
} catch (ShutdownException se) {
// Ignore checkpoint if the processor instance has been shutdown (fail over).
LOG.info("Caught shutdown exception, skipping checkpoint.", se);
break;
} catch (ThrottlingException e) {
// Backoff and re-attempt checkpoint upon transient failures
if (i >= (NUM_RETRIES - 1)) {
LOG.error("Checkpoint failed after " + (i + 1) + "attempts.", e);
break;
} else {
LOG.info("Transient issue when checkpointing - attempt " + (i + 1) + " of "
+ NUM_RETRIES, e);
}
} catch (InvalidStateException e) {
// This indicates an issue with the DynamoDB table (check for table, provisioned IOPS).
LOG.error("Cannot save checkpoint to the DynamoDB table used by the Amazon Kinesis Client Library.", e);
break;
}
try {
Thread.sleep(BACKOFF_TIME_IN_MILLIS);
} catch (InterruptedException e) {
LOG.debug("Interrupted sleep", e);
}
}
}
}
public class AmazonKinesisRecordProducerSample {
private static AmazonKinesis kinesis;
private static void init() throws Exception {
/*
* The ProfileCredentialsProvider will return your [default]
* credential profile by reading from the credentials file located at
* (~/.aws/credentials).
*/
ProfileCredentialsProvider credentialsProvider = new ProfileCredentialsProvider();
try {
credentialsProvider.getCredentials();
} catch (Exception e) {
throw new AmazonClientException(
"Cannot load the credentials from the credential profiles file. " +
"Please make sure that your credentials file is at the correct " +
"location (~/.aws/credentials), and is in valid format.",
e);
}
kinesis = AmazonKinesisClientBuilder.standard()
.withCredentials(credentialsProvider)
.withRegion("us-west-2")
.build();
}
public static void main(String[] args) throws Exception {
init();
final String myStreamName = AmazonKinesisApplicationSample.SAMPLE_APPLICATION_STREAM_NAME;
final Integer myStreamSize = 1;
// Describe the stream and check if it exists.
DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest().withStreamName(myStreamName);
try {
StreamDescription streamDescription = kinesis.describeStream(describeStreamRequest).getStreamDescription();
System.out.printf("Stream %s has a status of %s.\n", myStreamName, streamDescription.getStreamStatus());
if ("DELETING".equals(streamDescription.getStreamStatus())) {
System.out.println("Stream is being deleted. This sample will now exit.");
System.exit(0);
}
// Wait for the stream to become active if it is not yet ACTIVE.
if (!"ACTIVE".equals(streamDescription.getStreamStatus())) {
waitForStreamToBecomeAvailable(myStreamName);
}
} catch (ResourceNotFoundException ex) {
System.out.printf("Stream %s does not exist. Creating it now.\n", myStreamName);
// Create a stream. The number of shards determines the provisioned throughput.
CreateStreamRequest createStreamRequest = new CreateStreamRequest();
createStreamRequest.setStreamName(myStreamName);
createStreamRequest.setShardCount(myStreamSize);
kinesis.createStream(createStreamRequest);
// The stream is now being created. Wait for it to become active.
waitForStreamToBecomeAvailable(myStreamName);
}
// List all of my streams.
ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
listStreamsRequest.setLimit(10);
ListStreamsResult listStreamsResult = kinesis.listStreams(listStreamsRequest);
List<String> streamNames = listStreamsResult.getStreamNames();
while (listStreamsResult.isHasMoreStreams()) {
if (streamNames.size() > 0) {
listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
}
listStreamsResult = kinesis.listStreams(listStreamsRequest);
streamNames.addAll(listStreamsResult.getStreamNames());
}
// Print all of my streams.
System.out.println("List of my streams: ");
for (int i = 0; i < streamNames.size(); i++) {
System.out.println("\t- " + streamNames.get(i));
}
System.out.printf("Putting records in stream : %s until this application is stopped...\n", myStreamName);
System.out.println("Press CTRL-C to stop.");
// Write records to the stream until this program is aborted.
while (true) {
long createTime = System.currentTimeMillis();
PutRecordRequest putRecordRequest = new PutRecordRequest();
putRecordRequest.setStreamName(myStreamName);
putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", createTime).getBytes()));
putRecordRequest.setPartitionKey(String.format("partitionKey-%d", createTime));
PutRecordResult putRecordResult = kinesis.putRecord(putRecordRequest);
System.out.printf("Successfully put record, partition key : %s, ShardID : %s, SequenceNumber : %s.\n",
putRecordRequest.getPartitionKey(),
putRecordResult.getShardId(),
putRecordResult.getSequenceNumber());
}
}
private static void waitForStreamToBecomeAvailable(String myStreamName) throws InterruptedException {
System.out.printf("Waiting for %s to become ACTIVE...\n", myStreamName);
long startTime = System.currentTimeMillis();
long endTime = startTime + TimeUnit.MINUTES.toMillis(10);
while (System.currentTimeMillis() < endTime) {
Thread.sleep(TimeUnit.SECONDS.toMillis(20));
try {
DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
describeStreamRequest.setStreamName(myStreamName);
// ask for no more than 10 shards at a time -- this is an optional parameter
describeStreamRequest.setLimit(10);
DescribeStreamResult describeStreamResponse = kinesis.describeStream(describeStreamRequest);
String streamStatus = describeStreamResponse.getStreamDescription().getStreamStatus();
System.out.printf("\t- current state: %s\n", streamStatus);
if ("ACTIVE".equals(streamStatus)) {
return;
}
} catch (ResourceNotFoundException ex) {
// ResourceNotFound means the stream doesn't exist yet,
// so ignore this error and just keep polling.
} catch (AmazonServiceException ase) {
throw ase;
}
}
throw new RuntimeException(String.format("Stream %s never became active", myStreamName));
}
}
I used the sample code from this link :
https://github.com/aws/aws-sdk-java/tree/master/src/samples/AmazonKinesis
Try to change Application Name and then retry. Most of problems gets resolved by this simple change.
or try this code below.
AmazonKinesisApplicationSample.java :-
package KinesiSampleApplication.www.intellyzen.com;
/*
* Copyright 2012-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
import java.net.InetAddress;
import java.util.UUID;
import com.amazonaws.AmazonClientException;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.kinesis.AmazonKinesis;
import com.amazonaws.services.kinesis.AmazonKinesisClientBuilder;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker;
import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
/**
* Sample Amazon Kinesis Application.
*/
public final class AmazonKinesisApplicationSample {
/*
* Before running the code:
* Fill in your AWS access credentials in the provided credentials
* file template, and be sure to move the file to the default location
* (~/.aws/credentials) where the sample code will load the
* credentials from.
* https://console.aws.amazon.com/iam/home?#security_credential
*
* WARNING:
* To avoid accidental leakage of your credentials, DO NOT keep
* the credentials file in your source directory.
*/
public static final String SAMPLE_APPLICATION_STREAM_NAME = "IOTREST-API";
private static final String SAMPLE_APPLICATION_NAME = "SampleKinesisApplicationadsfdsa11 ";
// Initial position in the stream when the application starts up for the first time.
// Position can be one of LATEST (most recent data) or TRIM_HORIZON (oldest available data)
private static final InitialPositionInStream SAMPLE_APPLICATION_INITIAL_POSITION_IN_STREAM =
InitialPositionInStream.LATEST;
private static ProfileCredentialsProvider credentialsProvider;
private static void init() {
// Ensure the JVM will refresh the cached IP values of AWS resources (e.g. service endpoints).
java.security.Security.setProperty("networkaddress.cache.ttl", "60");
/*
* The ProfileCredentialsProvider will return your [default]
* credential profile by reading from the credentials file located at
* (~/.aws/credentials).
*/
credentialsProvider = new ProfileCredentialsProvider();
try {
credentialsProvider.getCredentials();
} catch (Exception e) {
throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
+ "Please make sure that your credentials file is at the correct "
+ "location (~/.aws/credentials), and is in valid format.", e);
}
}
public static void main(String[] args) throws Exception {
init();
if (args.length == 1 && "delete-resources".equals(args[0])) {
deleteResources();
return;
}
String workerId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + UUID.randomUUID();
KinesisClientLibConfiguration kinesisClientLibConfiguration =
new KinesisClientLibConfiguration(SAMPLE_APPLICATION_NAME,
SAMPLE_APPLICATION_STREAM_NAME,
credentialsProvider,
workerId);
kinesisClientLibConfiguration.withInitialPositionInStream(SAMPLE_APPLICATION_INITIAL_POSITION_IN_STREAM);
IRecordProcessorFactory recordProcessorFactory = new AmazonKinesisApplicationRecordProcessorFactory();
Worker worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration);
System.out.printf("Running %s to process stream %s as worker %s...\n",
SAMPLE_APPLICATION_NAME,
SAMPLE_APPLICATION_STREAM_NAME,
workerId);
int exitCode = 0;
try {
worker.run();
} catch (Throwable t) {
System.err.println("Caught throwable while processing data.");
t.printStackTrace();
exitCode = 1;
}
System.exit(exitCode);
}
public static void deleteResources() {
// Delete the stream
AmazonKinesis kinesis = AmazonKinesisClientBuilder.standard()
.withCredentials(credentialsProvider)
.withRegion("us-east-1")
.build();
System.out.printf("Deleting the Amazon Kinesis stream used by the sample. Stream Name = %s.\n",
SAMPLE_APPLICATION_STREAM_NAME);
try {
kinesis.deleteStream(SAMPLE_APPLICATION_STREAM_NAME);
} catch (ResourceNotFoundException ex) {
// The stream doesn't exist.
}
// Delete the table
AmazonDynamoDB dynamoDB = AmazonDynamoDBClientBuilder.standard()
.withCredentials(credentialsProvider)
.withRegion("us-east-1")
.build();
System.out.printf("Deleting the Amazon DynamoDB table used by the Amazon Kinesis Client Library. Table Name = %s.\n",
SAMPLE_APPLICATION_NAME);
try {
dynamoDB.deleteTable(SAMPLE_APPLICATION_NAME);
} catch (com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException ex) {
// The table doesn't exist.
}
}
}
AmazonKinesisApplicationSampleRecordProcessor.java:-
package KinesiSampleApplication.www.intellyzen.com;
/*
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
import com.amazonaws.services.kinesis.model.Record;
import software.amazon.kinesis.lifecycle.ShutdownReason;
/**
* Processes records and checkpoints progress.
*/
public class AmazonKinesisApplicationSampleRecordProcessor implements IRecordProcessor {
private static final Log LOG = LogFactory.getLog(AmazonKinesisApplicationSampleRecordProcessor.class);
private String kinesisShardId;
// Backoff and retry settings
private static final long BACKOFF_TIME_IN_MILLIS = 3000L;
private static final int NUM_RETRIES = 10;
// Checkpoint about once a minute
private static final long CHECKPOINT_INTERVAL_MILLIS = 60000L;
private long nextCheckpointTimeInMillis;
private final CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder();
/**
* {#inheritDoc}
*/
public void initialize(String shardId) {
LOG.info("Initializing record processor for shard: " + shardId);
this.kinesisShardId = shardId;
}
/**
* {#inheritDoc}
*/
public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) {
LOG.info("Processing " + records.size() + " records from " + kinesisShardId);
// Process records and perform all exception handling.
processRecordsWithRetries(records);
// Checkpoint once every checkpoint interval.
if (System.currentTimeMillis() > nextCheckpointTimeInMillis) {
checkpoint(checkpointer);
nextCheckpointTimeInMillis = System.currentTimeMillis() + CHECKPOINT_INTERVAL_MILLIS;
}
}
/**
* Process records performing retries as needed. Skip "poison pill" records.
*
* #param records Data records to be processed.
*/
private void processRecordsWithRetries(List<Record> records) {
for (Record record : records) {
boolean processedSuccessfully = false;
for (int i = 0; i < NUM_RETRIES; i++) {
try {
//
// Logic to process record goes here.
//
processSingleRecord(record);
processedSuccessfully = true;
break;
} catch (Throwable t) {
LOG.warn("Caught throwable while processing record " + record, t);
}
// backoff if we encounter an exception.
try {
Thread.sleep(BACKOFF_TIME_IN_MILLIS);
} catch (InterruptedException e) {
LOG.debug("Interrupted sleep", e);
}
}
if (!processedSuccessfully) {
LOG.error("Couldn't process record " + record + ". Skipping the record.");
}
}
}
/**
* Process a single record.
*
* #param record The record to be processed.
*/
private void processSingleRecord(Record record) {
// TODO Add your own record processing logic here
String data = null;
try {
// For this app, we interpret the payload as UTF-8 chars.
data = decoder.decode(record.getData()).toString();
System.out.println(data);
System.out.println("\n");
// Assume this record came from AmazonKinesisSample and log its age.
long recordCreateTime = new Long(data.substring("testData-".length()));
long ageOfRecordInMillis = System.currentTimeMillis() - recordCreateTime;
LOG.info(record.getSequenceNumber() + ", " + record.getPartitionKey() + ", " + data + ", Created "
+ ageOfRecordInMillis + " milliseconds ago.");
} catch (NumberFormatException e) {
LOG.info("Record does not match sample record format. Ignoring record with data; " + data);}
catch (CharacterCodingException e) {
LOG.error("Malformed data: " + data, e);
}
}
/**
* {#inheritDoc}
*/
public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) {
LOG.info("Shutting down record processor for shard: " + kinesisShardId);
// Important to checkpoint after reaching end of shard, so we can start processing data from child shards.
if (reason == ShutdownReason.LEASE_LOST) {
checkpoint(checkpointer);
}
}
/** Checkpoint with retries.
* #param checkpointer
*/
private void checkpoint(IRecordProcessorCheckpointer checkpointer) {
LOG.info("Checkpointing shard " + kinesisShardId);
for (int i = 0; i < NUM_RETRIES; i++) {
try {
checkpointer.checkpoint();
break;
} catch (ShutdownException se) {
// Ignore checkpoint if the processor instance has been shutdown (fail over).
LOG.info("Caught shutdown exception, skipping checkpoint.", se);
break;
} catch (ThrottlingException e) {
// Backoff and re-attempt checkpoint upon transient failures
if (i >= (NUM_RETRIES - 1)) {
LOG.error("Checkpoint failed after " + (i + 1) + "attempts.", e);
break;
} else {
LOG.info("Transient issue when checkpointing - attempt " + (i + 1) + " of "
+ NUM_RETRIES, e);
}
} catch (InvalidStateException e) {
// This indicates an issue with the DynamoDB table (check for table, provisioned IOPS).
LOG.error("Cannot save checkpoint to the DynamoDB table used by the Amazon Kinesis Client Library.", e);
break;
}
try {
Thread.sleep(BACKOFF_TIME_IN_MILLIS);
} catch (InterruptedException e) {
LOG.debug("Interrupted sleep", e);
}
}
}
public void shutdown(IRecordProcessorCheckpointer checkpointer,
com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason reason) {
// TODO Auto-generated method stub
}
}
AmazonKinesisApplicationRecordProcessorFactory.java:-
package KinesiSampleApplication.www.intellyzen.com;
/*
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory;
/**
* Used to create new record processors.
*/
public class AmazonKinesisApplicationRecordProcessorFactory implements IRecordProcessorFactory {
/**
* {#inheritDoc}
*/
public IRecordProcessor createProcessor() {
return new AmazonKinesisApplicationSampleRecordProcessor();
}
}
pom.xml:-
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>KinesiSampleApplication</groupId>
<artifactId>www.intellyzen.com</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<name>www.intellyzen.com</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>software.amazon.kinesis</groupId>
<artifactId>amazon-kinesis-client</artifactId>
<version>2.2.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.amazonaws/aws-java-sdk -->
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-kinesis</artifactId>
<version>1.11.551</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.amazonaws/amazon-kinesis-client -->
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>amazon-kinesis-client</artifactId>
<version>1.10.0</version>
</dependency>
<dependency>
<groupId>software.amazon.kinesis</groupId>
<artifactId>amazon-kinesis-client</artifactId>
<version>2.2.0</version>
</dependency>
<!-- Thanks for using https://jar-download.com -->
<!-- https://mvnrepository.com/artifact/com.fasterxml.jackson.dataformat/jackson-dataformat-cbor -->
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-cbor</artifactId>
<version>2.9.8</version>
</dependency>
</dependencies>
</project>
Your use case -- "I am trying to read data (records) from the stream"
You can find all AWS Java V2 examples here: https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/javav2/example_code/kinesis
Here is the Solution using the AWS Kinesis Java v2 API...
package com.example.kinesis;
//snippet-start:[kinesis.java2.getrecord.import]
import software.amazon.awssdk.core.SdkBytes;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.kinesis.KinesisClient;
import software.amazon.awssdk.services.kinesis.model.*;
import java.util.ArrayList;
import java.util.List;
//snippet-end:[kinesis.java2.getrecord.import]
/**
* Demonstrates how to read data from a Kinesis Data Stream. Before running this Java code example, populate a Data Stream
* by running the StockTradesWriter example. That example populates a Data Stream that you can then use for this example.
*/
public class GetRecords {
public static void main(String[] args) {
// snippet-start:[kinesis.java2.getrecord.main]
Region region = Region.US_EAST_1;
KinesisClient kinesisClient = KinesisClient.builder()
.region(region)
.build();
getStockTrades(kinesisClient);
}
private static void getStockTrades(KinesisClient kinesisClient) {
String shardIterator;
String lastShardId = null;
// Retrieve the Shards from a Stream
DescribeStreamRequest describeStreamRequest = DescribeStreamRequest.builder()
.streamName("StockTrade")
.build();
List<Shard> shards = new ArrayList<>();
DescribeStreamResponse streamRes;
do {
// describeStreamRequest.exclusiveStartShardId(lastShardId);
streamRes = kinesisClient.describeStream(describeStreamRequest);
shards.addAll(streamRes.streamDescription().shards());
if (shards.size() > 0) {
lastShardId = shards.get(shards.size() - 1).shardId();
}
} while (streamRes.streamDescription().hasMoreShards());
GetShardIteratorRequest itReq = GetShardIteratorRequest.builder()
.streamName("StockTrade")
.shardIteratorType("TRIM_HORIZON")
.shardId(shards.get(0).shardId())
.build();
GetShardIteratorResponse shardIteratorResult = kinesisClient.getShardIterator(itReq);
shardIterator = shardIteratorResult.shardIterator();
// Continuously read data records from shard.
List<Record> records;
while (true) {
// Create new GetRecordsRequest with existing shardIterator.
// Set maximum records to return to 1000.
GetRecordsRequest recordsRequest = GetRecordsRequest.builder()
.shardIterator(shardIterator)
.limit(1000)
.build();
GetRecordsResponse result = kinesisClient.getRecords(recordsRequest);
// Put result into record list. Result may be empty.
records = result.records();
// Print records
for (Record record : records) {
SdkBytes byteBuffer = record.data();
System.out.println(String.format("Seq No: %s - %s", record.sequenceNumber(),
new String(byteBuffer.asByteArray())));
}
try {
Thread.sleep(1000);
} catch (InterruptedException exception) {
throw new RuntimeException(exception);
}
shardIterator = result.nextShardIterator();
}
// snippet-end:[kinesis.java2.getrecord.main]
}
}

Selenium grid error : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions

We are executing multiple test cases in selenium grid where hub is connected to 2 machines, but every time I am running the grid, I get an error.
Error creating a webdriver. Exception message:
Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.
Active sessions are[]
Command duration or timeout: 0 milliseconds
Code:
private static List<WebDriver> m_listOfWebDrivers = Collections.synchronizedList(new ArrayList<WebDriver>());
private static ThreadLocal<WebDriver> m_driverForThread = new ThreadLocal<WebDriver>() {
#Override
protected WebDriver initialValue() {
WebDriver driver = null;
try {
driver = loadDesktopDriver();
} catch (Exception e) {
e.printStackTrace();
}
Log.info("Initializing Webdriver");
m_listOfWebDrivers.add(driver);
return driver;
}
};
protected static WebDriver loadDesktopDriver() throws Exception {
WebDriver driver = null;
Log.debug("Get Driver for Browser : " + m_browser);
try {
if (!m_runOnBrowserStack && null == m_browser) {
throw new IllegalArgumentException("Browser value should be provided for test");
}
driver = getNewDriver(m_browser, "", "", m_context);
**driver.manage().timeouts().implicitlyWait(50000, TimeUnit.MILLISECONDS);** /* this was added later, still didnt work*/
} catch (Exception e) {
Log.fatal("Error creating a webdriver. Exception message : " + e.getMessage());
throw e;
}
return driver;
}
public static WebDriver getNewDriver(String browserName, String browserVersion, String platform,
ITestContext context)
throws IOException, ComboBoxElementException, TextBoxElementException, ElementException, PageException {
LoggingPreferences logPrefs = new LoggingPreferences();
logPrefs.enable(LogType.BROWSER, Level.ALL);
/**
* These capabilities will need to be re assigned according to the
* browser we are going to be launched. This is required in case of
* running on Grid only but keeping it same normal execution to avoid
* code redundancy.
*/
DesiredCapabilities desiredCapabilities = null;
if (m_runOnBrowserStack) {
desiredCapabilities = new DesiredCapabilities();
JSONObject envs = (JSONObject) m_bsConfig.get("environment");
String bsEnvironment = context.getCurrentXmlTest().getParameter("bsEnvironment");
String testName = context.getCurrentXmlTest().getName();
Log.info("Environmnet details for Test [" + testName + "] is : " + bsEnvironment);
if (null == bsEnvironment)
throw new PageException(
"Environment name does not present in XML or not passed from CLI : " + bsEnvironment);
Map<String, String> envCapabilities = (Map<String, String>) envs.get(bsEnvironment);
if (null == envCapabilities)
throw new PageException("Environment name does not present in Config file : " + bsEnvironment);
Iterator<Entry<String, String>> it = envCapabilities.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, ?> pair = (Map.Entry<String, ?>) it.next();
desiredCapabilities.setCapability(pair.getKey().toString(), pair.getValue().toString());
}
Map<String, String> commonCapabilities = (Map<String, String>) m_bsConfig.get("capabilities");
it = commonCapabilities.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, ?> pair = (Map.Entry<String, ?>) it.next();
if (desiredCapabilities.getCapability(pair.getKey().toString()) == null) {
desiredCapabilities.setCapability(pair.getKey().toString(), pair.getValue().toString());
}
}
browserName = (String) desiredCapabilities.getCapability("browser");
}
DriverSupportedBrowsers driverType = DriverSupportedBrowsers.valueOf(browserName.toUpperCase());
if (null != m_gridUrl && !m_gridUrl.isEmpty()) {
m_gridUrl += "/wd/hub";
}
switch (driverType) {
case CHROME:
String chromeDriverPath = driverHome + File.separator + FrameworkConstants.chromeDriverExeName;
if (m_runOnBrowserStack || CommonHelper.isFileExists(chromeDriverPath)) {
if (!m_runOnBrowserStack)
desiredCapabilities = DesiredCapabilities.chrome();
desiredCapabilities.setCapability(CapabilityType.LOGGING_PREFS, logPrefs);
ChromeOptions options = new ChromeOptions();
options.addArguments("--start-maximized");
options.addArguments("--disable-extensions");
// To start browser in private mode
// options.addArguments("incognito");
desiredCapabilities.setCapability(ChromeOptions.CAPABILITY, options);
System.setProperty("webdriver.chrome.driver", chromeDriverPath);
if (null != m_gridUrl && !m_gridUrl.isEmpty()) {// for runs on
// grid
return new RemoteWebDriver(new URL(m_gridUrl), desiredCapabilities);
} else
return new RemoteWebDriver(service.getUrl(), desiredCapabilities); /* The code fails here generally with the failure*/
} else {
throw new FileNotFoundException(
"Chrome Driver path : " + chromeDriverPath + "\n\"" + FrameworkConstants.chromeDriverExeName
+ "\" not found in driver home path declared in System Environment Variable \""
+ driverHome + "\"");
}
#Parameters({ "bsEnvironment" })
#BeforeMethod(alwaysRun = true)
public void initialize(ITestContext context, Method method, #Optional String bsEnvironment)
throws IOException {
String customer = context.getCurrentXmlTest().getParameter("customer");
String testName = context.getCurrentXmlTest().getName();
setTestName(testName);
String methodName = "";
m_testMethod = method.getName();
methodName = testName + "_" + method.getName() + "_" + customer;
CommonHelper.renameRetryLog(m_logDir, methodName);
Log.setLog(m_logDir, methodName);
m_context = context;
if (m_runOnBrowserStack) {
if (null == context.getCurrentXmlTest().getParameter("bsEnvironment")
|| context.getCurrentXmlTest().getParameter("bsEnvironment").isEmpty()) {
Log.info("Adding bsEnvironment parameter for run on BrowserStack");
context.getCurrentXmlTest().addParameter("bsEnvironment", bsEnvironment);
}
}
try {
WebDriver driver = getDriverInstanceForThread();
if (null == driver)
throw new PageException("Driver is null. Initialization problem!!");
// Logging browser name and version parameters, driver and thread
// instances
String browserName = null;
String browserVersion = null;
try {
Capabilities webDriverCapablities = ((RemoteWebDriver) driver).getCapabilities();
browserName = webDriverCapablities.getBrowserName();
browserVersion = webDriverCapablities.getVersion();
} catch (ClassCastException e) {
Log.error("Unable to cast driver to RemoteWebdriver");
browserName = m_browser;
browserVersion = "NA";
}
Log.info("\n ****************** START OF TEST CASE " + method.getName() + " " + customer + ":"
+ browserName + ":" + browserVersion + "\t THREAD:" + Thread.currentThread().getId()
+ "\t WEBDRIVER:" + driver + " ****************** \n");
// driver.manage().timeouts().pageLoadTimeout(CommonConstants.PAGE_LOAD_WAIT_SEC,
// TimeUnit.SECONDS);
if (null != m_gridUrl) {
// Log the remote node ip address where the test is running
Log.info("Remote Node IP: " + CommonHelper.getIPOfRemoteNode(driver));
}
} catch (Exception e) {
e.printStackTrace();
Log.warn(e.getMessage());
}
The CMD is triggered for Node giving timeout and browser timeout:
java -Dwebdriver.chrome.driver=D:/imp/iTAF_Driver_Home/chromedriver.exe -jar selenium-server-standalone-3.3.1.jar -port 5554 -role node -hub http://10.18.15.168:5550/grid/register -timeout 86400 -browserTimeout 86000
This issue is a recurring error.

Jenkins - Groovy script not ending after return statement

I'm trying to run scheduled Groovy script on Jenkins but I got into some trouble - It won't finish runnning although it reaches the "return" statement.
I'm using in-house dependencies and Classes and I've found the line of code that if omitted, the script returns successfully. But I can't omit this line unfortunately :(
Do you have any idea what could cause a Jenkins build step too stay stuck?
I've noticed that the "culprit" line of code, internally runs the following:
this.executorService.scheduleWithFixedDelay(this.eventsPublisher, 3L, 3L, TimeUnit.SECONDS);
Is it possible that playing with the Executor, messes around with the Jenkins build steps?
I'd love some help,
Thanks a lot :)
UPDATE:
Code:
import java.sql.DriverManager
import java.sql.ResultSet
import java.text.DateFormat
import java.text.SimpleDateFormat
import hudson.model.*
def verticaConn = null
def verticaStmt = null
def mongoConnection = null
try {
println("start script: vertica_to_kafka")
// get params
def verticaHostName = System.getenv("verticaHostName") //dev=192.168.247.11:5433 prod=192.168.251.120:5433
def verticaDbName = System.getenv("verticaDbName")
def verticaTBName = System.getenv("verticaTBName")
def bootstrapServers = System.getenv("bootstrapServers")
def limitNum = System.getenv("limitNum").toInteger()
def startTime = System.getenv("startTime")
MyKafkaStringProducer producer = new MyKafkaStringProducer();
producer.init()
MyEventDao eventDao = new MyEventDao();
eventDao.setStringProducer(stringProducer);
Class.forName("com.vertica.jdbc.Driver")
String verticaConnectionString = "jdbc:vertica://${verticaHostName}/${verticaDbName}"
Properties verticaProp = new Properties();
verticaProp.put("user", "user");
verticaProp.put("password", "password");
verticaProp.put("ConnectionLoadBalance", 1);
verticaConn = DriverManager.getConnection(verticaConnectionString, verticaProp);
verticaStmt = verticaConn.createStatement()
// vertica execution timestamp
long currentTS = System.currentTimeMillis()
DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String startTS = "1970-01-01 00:00:00";
String command= "select * from ${verticaTBName} where ts >'${startTS}' "
if (limitNum > 0) command += "limit ${limitNum}"
println("querying vertica")
verticaStmt.execute(command)
ResultSet results = verticaStmt.getResultSet()
println("start to send data to kafka")
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss");
while(results.next()){
long id = results.getLong("id");
String domain = results.getString("domain");
String text = results.getString("text");
Date ts = dateFormat.parse(results.getString("ts"));
MyEntity myEntity = new MyEntity(id, domain, text, ts);
eventDao.saveEntity(myEntity);
}
} catch (Exception e){
println(e.printStackTrace())
} finally {
println("going to release resources");
if (verticaStmt != null){
try{
verticaStmt.close()
println("vertica statement closed successfully!");
} catch (Exception e) {
//println("error in close the vertica statement {}", e.getMessage());
}
}
if (verticaConn != null){
try{
verticaConn.close()
println("vertica connection closed successfully!");
} catch (Exception e) {
//println("error in close the vertica connection {}", e.getMessage());
}
}
if (mongoConnection != null){
try {
mongoConnection.getMongo().close();
println("mongo connection closed successfully!");
} catch (Exception e) {
//println("error in close the mongo connection {}", e.getMessage());
}
}
println("end script: vertica_to_kafka")
}
return
System.exit(0)
And in MyKafkaStringProducer I found the following:
public synchronized void init() {
if(this.active) {
this.initKafkaProducer();
this.executorService.scheduleWithFixedDelay(this.eventsPublisher, 3L, 3L, TimeUnit.SECONDS);
}
}

How to read and write a xml file into the same location?

I have prepared xml files with some content and want to load it while playing on iOS device but also I want to change loaded data and serialize it in the same file again.
In Unity Editor (Windows) it works perfectly, but when I test it on iOS device it seems that I can read from StreamingAssets using WWW class, but I can't write into it.
Also I have found that I can read and write into path created by Application.persistentDataPath. But it seems that location somewhere in device and I can't put my xml into that location and users have access to that folder so that isn't good solution, isn't it?
Here code that I use to load and save the data.
using UnityEngine;
using UnityEngine.UI;
using System.Collections;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.IO;
using System.Xml;
public class testxml : MonoBehaviour {
public Text result;
public InputField firstPart, secondPart;
public Toggle toggle;
private List<int> listToSave;
// Use this for initialization
void Start () {
listToSave = new List<int>();
}
public void Save()
{
Serialize();
}
public void Load()
{
StartCoroutine(Deserialize());
}
private void Serialize()
{
string path = GetPath();
try
{
Debug.Log("trying to save");
var serializer = new XmlSerializer(typeof(List<int>));
using (var fs = new FileStream(path, FileMode.OpenOrCreate))
{
serializer.Serialize(fs, listToSave);
}
}
catch (XmlException e)
{
result.text = "error";
Debug.LogError(path + " with " + (toggle.isOn ? "persistent data path" : "data path"));
Debug.LogError("xml exc while des file : " + e.Message);
}
catch (System.Exception e)
{
result.text = "error";
Debug.LogError("exc while des file : " + e.Message);
Debug.LogError(path + " with " + (toggle.isOn ? "persistent data path" : "data path"));
System.Exception exc = e.InnerException;
int i = 0;
while (exc != null)
{
Debug.Log("inner " + i + ": " + exc.Message);
i++;
exc = exc.InnerException;
}
}
}
private IEnumerator Deserialize()
{
Debug.Log("trying to load");
string path = GetPath();
var www = new WWW(path);
yield return www;
if (www.isDone && string.IsNullOrEmpty(www.error))
{
try
{
var serializer = new XmlSerializer(typeof(List<int>));
MemoryStream ms = new MemoryStream(www.bytes);
listToSave = serializer.Deserialize(ms) as List<int>;
ms.Close();
result.text += "Done\n";
foreach (var i in listToSave)
result.text += i + "\n";
}
catch (XmlException e)
{
result.text = "error";
Debug.LogError(path + " with " + (toggle.isOn?"persistent data path":"data path"));
Debug.LogError("xml exc while des file : " + e.Message);
}
catch (System.Exception e)
{
result.text = "error";
Debug.LogError("exc while des file : " + e.Message);
Debug.LogError(path + " with " + (toggle.isOn ? "persistent data path" : "data path"));
System.Exception exc = e.InnerException;
int i = 0;
while(exc!=null)
{
Debug.Log("inner "+i+": " + exc.Message);
i++;
exc = exc.InnerException;
}
}
yield break;
}
else
{
Debug.LogError("www exc while des file " + www.error);
Debug.LogError(path + " with " + (toggle.isOn ? "persistent data path" : "data path"));
yield break;
}
}
private string GetPath()
{
string path = firstPart.text;
if (toggle.isOn)
{
path += Application.persistentDataPath;
}
else
path += Application.dataPath;
path += secondPart.text;
return path;
}
}
"I want to put my xml file in this folder, and then read it. It's like default info for game"
easy, just put it in your assets. go like this...
public TextAsset myXMLFile;
in Inspector drag the file there. You're done.
"but then I also want to change that file and save"
Fair enough. What you have to do is
(1) make a path p = Application.persistentDataPath + "values.txt"
(2) program launches.
(3) check if "p" exists. if yes, read it and go to (6)
(4) IF NOT, read the textasset and save that to "p"
(5) go to point (3)
(6) you're done.
It's the only way to do it. This is indeed the normal procedure in Unity, you do it in every Unity app. There's no other way!

sshexec ant task: environment variables

I'm using SSHExec ant task to connect to a remote host and I depend on the environment variables that are set on the remote host in order to be able to successfully execute some commands.
<sshexec host="somehost"
username="${username}"
password="${password}"
command="set"/>
Using the task the env. variables that are outputed are not the same as the ones I get when I log in using an SSH Client.
How can I make the env. variables of the remote host avaiable for the session?
Actually there is something you can do about the fact it doesn't start a shell. Use the following:
<sshexec command="/bin/bash -l yourScript.sh" .../>
Using /bin/bash -l will start an login shell then execute your script within that shell. It would be exactly as if you had a version of sshexec that properly starts up a login shell. It has to be a script. If you want to run a single executable command you can do this:
<sshexec command="/bin/bash -l -c 'echo $CATALINA_HOME'" .../>
I've found out that the current SSHExeec task implementation is using JSCh's ChannelExec (remote execution of commands) instead of a ChannelShell (remote shell) as connection channel.
That means that apparentely as per JSCh's current implementation a ChannelExec doesn't load env. variables.
I'm still not sure wether this is a limitation on the protocol or on the API.
The conclusion is that as for now there's no solution for the problem, unless you implement your own Ant task.
A working draft of how it would be:
package org.apache.tools.ant.taskdefs.optional.ssh;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.io.StringReader;
import org.apache.tools.ant.BuildException;
import org.apache.tools.ant.Project;
import org.apache.tools.ant.types.Resource;
import org.apache.tools.ant.types.resources.FileResource;
import org.apache.tools.ant.util.FileUtils;
import org.apache.tools.ant.util.KeepAliveOutputStream;
import org.apache.tools.ant.util.TeeOutputStream;
import com.jcraft.jsch.Channel;
import com.jcraft.jsch.ChannelExec;
import com.jcraft.jsch.JSchException;
import com.jcraft.jsch.Session;
/**
* Executes a command on a remote machine via ssh.
* #since Ant 1.6 (created February 2, 2003)
*/
public class SSHExecShellSupport extends SSHBase {
private static final String COMMAND_SEPARATOR = System.getProperty("line.separator");
private static final int BUFFER_SIZE = 8192;
private static final int RETRY_INTERVAL = 500;
/** the command to execute via ssh */
private String command = null;
/** units are milliseconds, default is 0=infinite */
private long maxwait = 0;
/** for waiting for the command to finish */
private Thread thread = null;
private String outputProperty = null; // like <exec>
private File outputFile = null; // like <exec>
private boolean append = false; // like <exec>
private Resource commandResource = null;
private boolean isShellMode;
private long maxTimeWithoutAnyData = 1000*10;
private static final String TIMEOUT_MESSAGE =
"Timeout period exceeded, connection dropped.";
public long getMaxTimeWithoutAnyData() {
return maxTimeWithoutAnyData;
}
public void setMaxTimeWithoutAnyData(long maxTimeWithoutAnyData) {
this.maxTimeWithoutAnyData = maxTimeWithoutAnyData;
}
public boolean isShellMode() {
return isShellMode;
}
public void setShellMode(boolean isShellMode) {
this.isShellMode = isShellMode;
}
/**
* Constructor for SSHExecTask.
*/
public SSHExecShellSupport() {
super();
}
/**
* Sets the command to execute on the remote host.
*
* #param command The new command value
*/
public void setCommand(String command) {
this.command = command;
}
/**
* Sets a commandResource from a file
* #param f the value to use.
* #since Ant 1.7.1
*/
public void setCommandResource(String f) {
this.commandResource = new FileResource(new File(f));
}
/**
* The connection can be dropped after a specified number of
* milliseconds. This is sometimes useful when a connection may be
* flaky. Default is 0, which means "wait forever".
*
* #param timeout The new timeout value in seconds
*/
public void setTimeout(long timeout) {
maxwait = timeout;
}
/**
* If used, stores the output of the command to the given file.
*
* #param output The file to write to.
*/
public void setOutput(File output) {
outputFile = output;
}
/**
* Determines if the output is appended to the file given in
* <code>setOutput</code>. Default is false, that is, overwrite
* the file.
*
* #param append True to append to an existing file, false to overwrite.
*/
public void setAppend(boolean append) {
this.append = append;
}
/**
* If set, the output of the command will be stored in the given property.
*
* #param property The name of the property in which the command output
* will be stored.
*/
public void setOutputproperty(String property) {
outputProperty = property;
}
/**
* Execute the command on the remote host.
*
* #exception BuildException Most likely a network error or bad parameter.
*/
public void execute() throws BuildException {
if (getHost() == null) {
throw new BuildException("Host is required.");
}
if (getUserInfo().getName() == null) {
throw new BuildException("Username is required.");
}
if (getUserInfo().getKeyfile() == null
&& getUserInfo().getPassword() == null) {
throw new BuildException("Password or Keyfile is required.");
}
if (command == null && commandResource == null) {
throw new BuildException("Command or commandResource is required.");
}
if(isShellMode){
shellMode();
} else {
commandMode();
}
}
private void shellMode() {
final Object lock = new Object();
Session session = null;
try {
session = openSession();
final Channel channel=session.openChannel("shell");
final PipedOutputStream pipedOS = new PipedOutputStream();
PipedInputStream pipedIS = new PipedInputStream(pipedOS);
final Thread commandProducerThread = new Thread("CommandsProducerThread"){
public void run() {
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(commandResource.getInputStream()));
String singleCmd;
synchronized (lock) {
lock.wait(); // waits for the reception of the very first data (before commands are issued)
while ((singleCmd = br.readLine()) != null) {
singleCmd += COMMAND_SEPARATOR;
log("cmd : " + singleCmd, Project.MSG_INFO);
pipedOS.write(singleCmd.getBytes());
lock.notify();
try {
lock.wait();
} catch (InterruptedException e) {
log(e, Project.MSG_VERBOSE);
break;
}
}
log("Finished producing commands", Project.MSG_VERBOSE);
}
} catch (IOException e) {
log(e, Project.MSG_VERBOSE);
} catch (InterruptedException e) {
log(e, Project.MSG_VERBOSE);
} finally {
FileUtils.close(br);
}
}
};
ByteArrayOutputStream out = new ByteArrayOutputStream();
final TeeOutputStream tee = new TeeOutputStream(out, new KeepAliveOutputStream(System.out));
channel.setOutputStream(tee);
channel.setExtOutputStream(tee);
channel.setInputStream(pipedIS);
channel.connect();
// waits for it to finish receiving data response and then ask for another the producer to issue one more command
thread = new Thread("DataReceiverThread") {
public void run() {
long lastTimeConsumedData = System.currentTimeMillis(); // initializes the watch
try {
InputStream in = channel.getInputStream();
byte[] tmp = new byte[1024];
while (true) {
if(thread == null){ // works with maxTimeout (for the whole task to complete)
break;
}
while (in.available() > 0) {
int i = in.read(tmp, 0, 1024);
lastTimeConsumedData = System.currentTimeMillis();
if (i < 0){
break;
}
tee.write(tmp, 0, i);
}
if (channel.isClosed()) {
log("exit-status: " + channel.getExitStatus(), Project.MSG_INFO);
log("channel.isEOF(): " + channel.isEOF(), Project.MSG_VERBOSE);
log("channel.isConnected(): " + channel.isConnected(), Project.MSG_VERBOSE);
throw new BuildException("Connection lost."); // NOTE: it also can happen that if one of the command are "exit" the channel will be closed!
}
synchronized(lock){
long elapsedTimeWithoutData = (System.currentTimeMillis() - lastTimeConsumedData);
if (elapsedTimeWithoutData > maxTimeWithoutAnyData) {
log(elapsedTimeWithoutData / 1000 + " secs elapsed without any data reception. Notifying command producer.", Project.MSG_VERBOSE);
lock.notify(); // command producer is waiting for this
try {
lock.wait(500); // wait til we have new commands.
Thread.yield();
log("Continuing consumer loop. commandProducerThread.isAlive()?" + commandProducerThread.isAlive(), Project.MSG_VERBOSE);
if(!commandProducerThread.isAlive()){
log("No more commands to be issued and it's been too long without data reception. Exiting consumer.", Project.MSG_VERBOSE);
break;
}
} catch (InterruptedException e) {
log(e, Project.MSG_VERBOSE);
break;
}
lastTimeConsumedData = System.currentTimeMillis(); // resets watch
}
}
}
} catch (IOException e) {
throw new BuildException(e);
}
}
};
thread.start();
commandProducerThread.start();
thread.join(maxwait);
if (thread.isAlive()) {
// ran out of time
thread = null;
if (getFailonerror()) {
throw new BuildException(TIMEOUT_MESSAGE);
} else {
log(TIMEOUT_MESSAGE, Project.MSG_ERR);
}
} else {
//success
if (outputFile != null) {
writeToFile(out.toString(), append, outputFile);
}
// this is the wrong test if the remote OS is OpenVMS,
// but there doesn't seem to be a way to detect it.
log("Exit status (not reliable): " + channel.getExitStatus(), Project.MSG_INFO);
// int ec = channel.getExitStatus(); FIXME
// if (ec != 0) {
// String msg = "Remote command failed with exit status " + ec;
// if (getFailonerror()) {
// throw new BuildException(msg);
// } else {
// log(msg, Project.MSG_ERR);
// }
// }
}
} catch (Exception e){
throw new BuildException(e);
} finally {
if (session != null && session.isConnected()) {
session.disconnect();
}
}
}
private void commandMode() {
Session session = null;
try {
session = openSession();
/* called once */
if (command != null) {
log("cmd : " + command, Project.MSG_INFO);
ByteArrayOutputStream out = executeCommand(session, command);
if (outputProperty != null) {
//#bugzilla 43437
getProject().setNewProperty(outputProperty, command + " : " + out);
}
} else { // read command resource and execute for each command
try {
BufferedReader br = new BufferedReader(
new InputStreamReader(commandResource.getInputStream()));
String cmd;
String output = "";
while ((cmd = br.readLine()) != null) {
log("cmd : " + cmd, Project.MSG_INFO);
ByteArrayOutputStream out = executeCommand(session, cmd);
output += cmd + " : " + out + "\n";
}
if (outputProperty != null) {
//#bugzilla 43437
getProject().setNewProperty(outputProperty, output);
}
FileUtils.close(br);
} catch (IOException e) {
throw new BuildException(e);
}
}
} catch (JSchException e) {
throw new BuildException(e);
} finally {
if (session != null && session.isConnected()) {
session.disconnect();
}
}
}
private ByteArrayOutputStream executeCommand(Session session, String cmd)
throws BuildException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
TeeOutputStream tee = new TeeOutputStream(out, new KeepAliveOutputStream(System.out));
try {
final ChannelExec channel;
session.setTimeout((int) maxwait);
/* execute the command */
channel = (ChannelExec) session.openChannel("exec");
channel.setCommand(cmd);
channel.setOutputStream(tee);
channel.setExtOutputStream(tee);
channel.connect();
// wait for it to finish
thread =
new Thread() {
public void run() {
while (!channel.isClosed()) {
if (thread == null) {
return;
}
try {
sleep(RETRY_INTERVAL);
} catch (Exception e) {
// ignored
}
}
}
};
thread.start();
thread.join(maxwait);
if (thread.isAlive()) {
// ran out of time
thread = null;
if (getFailonerror()) {
throw new BuildException(TIMEOUT_MESSAGE);
} else {
log(TIMEOUT_MESSAGE, Project.MSG_ERR);
}
} else {
//success
if (outputFile != null) {
writeToFile(out.toString(), append, outputFile);
}
// this is the wrong test if the remote OS is OpenVMS,
// but there doesn't seem to be a way to detect it.
int ec = channel.getExitStatus();
if (ec != 0) {
String msg = "Remote command failed with exit status " + ec;
if (getFailonerror()) {
throw new BuildException(msg);
} else {
log(msg, Project.MSG_ERR);
}
}
}
} catch (BuildException e) {
throw e;
} catch (JSchException e) {
if (e.getMessage().indexOf("session is down") >= 0) {
if (getFailonerror()) {
throw new BuildException(TIMEOUT_MESSAGE, e);
} else {
log(TIMEOUT_MESSAGE, Project.MSG_ERR);
}
} else {
if (getFailonerror()) {
throw new BuildException(e);
} else {
log("Caught exception: " + e.getMessage(),
Project.MSG_ERR);
}
}
} catch (Exception e) {
if (getFailonerror()) {
throw new BuildException(e);
} else {
log("Caught exception: " + e.getMessage(), Project.MSG_ERR);
}
}
return out;
}
/**
* Writes a string to a file. If destination file exists, it may be
* overwritten depending on the "append" value.
*
* #param from string to write
* #param to file to write to
* #param append if true, append to existing file, else overwrite
* #exception Exception most likely an IOException
*/
private void writeToFile(String from, boolean append, File to)
throws IOException {
FileWriter out = null;
try {
out = new FileWriter(to.getAbsolutePath(), append);
StringReader in = new StringReader(from);
char[] buffer = new char[BUFFER_SIZE];
int bytesRead;
while (true) {
bytesRead = in.read(buffer);
if (bytesRead == -1) {
break;
}
out.write(buffer, 0, bytesRead);
}
out.flush();
} finally {
if (out != null) {
out.close();
}
}
}
}
Another simple workaround is to source the user's .bash_profile before running your commands:
<sshexec host="somehost"
username="${username}"
password="${password}"
command="source ~/.bash_profile && set"/>
Great post chubbsondubs. I needed to set the ORACLE SID then execute a PLSQL script that does not have the proper exit. Hence the echo exit piped.
<sshexec host="${db.ipaddr}"
verbose="true"
trust="true"
username="${scp.oracle.userid}"
password="${scp.oracle.password}"
command="echo exit | /bin/bash -l -c 'export ORACLE_SID=${db.name} ; sqlplus ${db.dbo.userid}/${db.dbo.password} #./INSTALL_REVPORT/CreateDatabase/gengrant.sql'"
/>

Resources