EntityManager em = getEntityManager();
EntityTransaction etx = em.getTransaction();
etx.begin();
Query query = em.createNamedQuery("login_procedure").setParameter("param1","user").setParameter("param2", "pw");
Integer result = 23;
try {
System.out.println("query = " + query.getSingleResult());
} catch (Exception e) {
result = null;
e.printStackTrace();
}
etx.commit();
em.close();
...executing this code I get
[EL Warning]: 2011-02-10 17:32:16.846--UnitOfWork(1267140342)--Exception
[EclipseLink-4002] (Eclipse
Persistence Services -
1.2.0.v20091016-r5565): org.eclipse.persistence.exceptions.DatabaseException
Internal Exception:
org.firebirdsql.jdbc.FBSQLException:
GDS Exception. 335544569. Dynamic SQL
Error SQL error code = -104 Token
unknown - line 1, column 36
= Error Code: 335544569 Call: EXECUTE PROCEDURE LOGIN_PROCEDURE(USER_NAME =
?, USER_PASSWORD = ?) bind => [user,
pw] Query:
DataReadQuery(name="login_procedure" )
The -104 SQL error usually indicates a SQL syntax error.
Everything is processed without any error until query.getSingleResult() is called. Calling query.getResultList() doesn't change anything. I've tried several 1.x and 2.x EclipseLink versions. The Firebird DB version is 2.1.
The JPA2 declaration is:
#Entity
#NamedStoredProcedureQuery(
name = "login_procedure",
resultClass = void.class,
procedureName = "LOGIN_PROCEDURE",
returnsResultSet = false,
parameters = {
#StoredProcedureParameter(queryParameter = "param1", name = "USER_NAME", direction = Direction.IN, type = String.class),
#StoredProcedureParameter(queryParameter = "param2", name = "USER_PASSWORD", direction = Direction.IN, type = String.class)
}
)
#Table(name = "USERS")
public class Login implements Serializable {
#Id
private Long id;
}
UPDATE:
After tinkering a little bit more, I believe there might be an error in the EclipseLink implementation as EXECUTE PROCEDURE LOGIN_PROCEDURE(USER_NAME = ?, USER_PASSWORD = ?) isn't valid Firebird 2.1 syntax for calling procedures.
By specifying the name="USER_NAME" you are making Eclipselink use the 'USER_NAME=?' syntax instead of just passing in the unnamed parameter. Try removing the name definition.
Inspired by this post, I've found a solution/workaround:
public class JPATest {
final Session session;
JPATest() {
final String DATABASE_USERNAME = "SYSDBA";
final String DATABASE_PASSWORD = "masterkey";
final String DATABASE_URL = "jdbc:firebirdsql:dbServer/3050:e:/my/db.fdb";
final String DATABASE_DRIVER = "org.firebirdsql.jdbc.FBDriver";
final DatabaseLogin login = new DatabaseLogin();
login.setUserName(DATABASE_USERNAME);
login.setPassword(DATABASE_PASSWORD);
login.setConnectionString(DATABASE_URL);
login.setDriverClassName(DATABASE_DRIVER);
login.setDatasourcePlatform(new FirebirdPlatform());
login.bindAllParameters();
final Project project = new Project(login);
session = project.createDatabaseSession();
session.setLogLevel(SessionLog.FINE);
((DatabaseSession) session).login();
}
public static void main(String[] args) {
final JPATest jpaTest = new JPATest();
jpaTest.run();
}
protected void run() {
testProcCursor();
}
/*
* Run Proc with scalar input and cursor output
*/
#SuppressWarnings("unchecked")
private void testProcCursor() {
final StoredProcedureCall call = new StoredProcedureCall();
call.setProcedureName("LOGIN");
call.addUnamedArgument("USER_NAME"); // .addNamedArgument doesn't work
call.addUnamedArgument("USER_PASSWORD");
final DataReadQuery query = new DataReadQuery();
query.setCall(call);
query.addArgument("USER_NAME");
query.addArgument("USER_PASSWORD");
final List<String> queryArgs = new ArrayList<String>();
queryArgs.add("onlinetester");
queryArgs.add("test");
final List outList = (List) session.executeQuery(query, queryArgs);
final ListIterator<DatabaseRecord> listIterator = ((List<DatabaseRecord>) outList).listIterator();
while (listIterator.hasNext()) {
final DatabaseRecord databaseRecord = listIterator.next();
System.out.println("Value -->" + databaseRecord.getValues());
}
}
}
Apparently named parameters aren't supported in my specific configuration but using unnamed parameters in annotations, hasn't solved the problem either. However using unnamed parameters, as specified above, solved the problem for me.
Related
I have a PCollection [String] say "X" that I need to dump in a BigQuery table.
The table destination and the schema for it is in a PCollection[TableRow] say "Y".
How to accomplish this in the simplest manner?
I tried extracting the table and schema from "Y" and saving it in static global variables (tableName and schema respectively). But somehow oddly the BigQueryIO.writeTableRows() always gets the value of the variable tableName as null. But it gets the schema. I tried logging the values of those variables and I can see the values are there for both.
Here is my pipeline code:
static String tableName;
static TableSchema schema;
PCollection<String> read = p.apply("Read from input file",
TextIO.read().from(options.getInputFile()));
PCollection<TableRow> tableRows = p.apply(
BigQueryIO.read().fromQuery(NestedValueProvider.of(
options.getfilename(),
new SerializableFunction<String, String>() {
#Override
public String apply(String filename) {
return "SELECT table,schema FROM `BigqueryTest.configuration` WHERE file='" + filename +"'";
}
})).usingStandardSql().withoutValidation());
final PCollectionView<List<String>> dataView = read.apply(View.asList());
tableRows.apply("Convert data read from file to TableRow",
ParDo.of(new DoFn<TableRow,TableRow>(){
#ProcessElement
public void processElement(ProcessContext c) {
tableName = c.element().get("table").toString();
String[] schemas = c.element().get("schema").toString().split(",");
List<TableFieldSchema> fields = new ArrayList<>();
for(int i=0;i<schemas.length;i++) {
fields.add(new TableFieldSchema()
.setName(schemas[i].split(":")[0]).setType(schemas[i].split(":")[1]));
}
schema = new TableSchema().setFields(fields);
//My code to convert data to TableRow format.
}}).withSideInputs(dataView));
tableRows.apply("write to BigQuery",
BigQueryIO.writeTableRows()
.withSchema(schema)
.to("ProjectID:DatasetID."+tableName)
.withWriteDisposition(BigQueryIO.Write.WriteDisposition.WRITE_TRUNCATE)
.withCreateDisposition(BigQueryIO.Write.CreateDisposition.CREATE_IF_NEEDED));
Everything works fine. Only BigQueryIO.write operation fails and I get the error TableId is null.
I also tried using SerializableFunction and returning the value from there but i still get null.
Here is the code that I tried for it:
tableRows.apply("write to BigQuery",
BigQueryIO.writeTableRows()
.withSchema(schema)
.to(new GetTable(tableName))
.withWriteDisposition(BigQueryIO.Write.WriteDisposition.WRITE_TRUNCATE)
.withCreateDisposition(BigQueryIO.Write.CreateDisposition.CREATE_IF_NEEDED));
public static class GetTable implements SerializableFunction<String,String> {
String table;
public GetTable() {
this.table = tableName;
}
#Override
public String apply(String arg0) {
return "ProjectId:DatasetId."+table;
}
}
I also tried using DynamicDestinations but I get an error saying schema is not provided. Honestly I'm new to the concept of DynamicDestinations and I'm not sure that I'm doing it correctly.
Here is the code that I tried for it:
tableRows2.apply(BigQueryIO.writeTableRows()
.to(new DynamicDestinations<TableRow, TableRow>() {
private static final long serialVersionUID = 1L;
#Override
public TableDestination getTable(TableRow dest) {
List<TableRow> list = sideInput(bqDataView); //bqDataView contains table and schema
String table = list.get(0).get("table").toString();
String tableSpec = "ProjectId:DatasetId."+table;
String tableDescription = "";
return new TableDestination(tableSpec, tableDescription);
}
public String getSideInputs(PCollectionView<List<TableRow>> bqDataView) {
return null;
}
#Override
public TableSchema getSchema(TableRow destination) {
return schema; //schema is getting added from the global variable
}
#Override
public TableRow getDestination(ValueInSingleWindow<TableRow> element) {
return null;
}
}.getSideInputs(bqDataView)));
Please let me know what I'm doing wrong and which path I should take.
Thank You.
Part of the reason your having trouble is because of the two stages of pipeline execution. First the pipeline is constructed on your machine. This is when all of the applications of PTransforms occur. In your first example, this is when the following lines are executed:
BigQueryIO.writeTableRows()
.withSchema(schema)
.to("ProjectID:DatasetID."+tableName)
The code within a ParDo however runs when your pipeline executes, and it does so on many machines. So the following code runs much later than the pipeline construction:
#ProcessElement
public void processElement(ProcessContext c) {
tableName = c.element().get("table").toString();
...
schema = new TableSchema().setFields(fields);
...
}
This means that neither the tableName nor the schema fields will be set at when the BigQueryIO sink is created.
Your idea to use DynamicDestinations is correct, but you need to move the code to actually generate the schema the destination into that class, rather than relying on global variables that aren't available on all of the machines.
I am trying to build a custom batch filter that extends SimpleBatchFilter. However, I am experiencing the problem of running it second time to get an inverted output. Here is the relevant code and the error I am getting after both runs are completed:
Exception in thread "main" java.lang.IndexOutOfBoundsException: Index: 79, Size: 79
at java.util.ArrayList.rangeCheck(ArrayList.java:653)
at java.util.ArrayList.get(ArrayList.java:429)
at weka.core.Attribute.addStringValue(Attribute.java:994)
at weka.core.StringLocator.copyStringValues(StringLocator.java:155)
at weka.core.StringLocator.copyStringValues(StringLocator.java:91)
at weka.filters.Filter.copyValues(Filter.java:373)
at weka.filters.Filter.push(Filter.java:290)
at weka.filters.SimpleBatchFilter.batchFinished(SimpleBatchFilter.java:266)
at weka.filters.Filter.useFilter(Filter.java:667)
at likeability.Main.main(Main.java:30)
And here is the relevant code:
public class TestFilter extends SimpleBatchFilter {
private Attribute a;
private Attribute b;
private int sampleSizePercent = 15;
private boolean invert = false;
private int seed = 1;
#Override
protected Instances process(Instances inst) throws Exception {
ArrayList<Instances> partitionsA = partition(inst, a);
ArrayList<Instances> partitions = new ArrayList<Instances>();
for(Instances data: partitionsA) {
partitions.addAll(partition(data, b));
}
return getTestSet(partitions);
}
/*
* Partitions the data so that there's only one nominal value of the
* attribute a in one partition.
*/
private ArrayList<Instances> partition(Instances data, Attribute att) throws Exception {
ArrayList<Instances> instances = new ArrayList<Instances>();
for (int i = 0; i < att.numValues(); i++){
RemoveWithValues rm = new RemoveWithValues();
rm.setAttributeIndex(Integer.toString(att.index()+1));
rm.setInvertSelection(true);
rm.setNominalIndices(Integer.toString(i+1));
rm.setInputFormat(data);
instances.add(Filter.useFilter(data, rm));
}
return instances;
}
private Instances getTestSet(List<Instances> insts) throws Exception {
Instances output = new Instances(insts.get(0), 0);
for(Instances inst: insts) {
Resample filter = new Resample();
filter.setRandomSeed(seed);
filter.setNoReplacement(true);
filter.setInvertSelection(invert);
filter.setSampleSizePercent(sampleSizePercent);
filter.setInputFormat(inst);
Instances curr = Filter.useFilter(inst, filter);
System.out.println(inst.size() + " " + curr.size());
output.addAll(curr);
}
return output;
}
#Override
protected Instances determineOutputFormat(Instances arg) throws Exception {
return new Instances(arg, 0);
}
#Override
public String globalInfo() {
return "A filter which partitions the data so that each partition contains"
+ " only instances with one value of attribute a and b, then takes "
+ "a random subset of values from each partition and merges them to"
+ " produce the final set.";
}
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.enableAllAttributes();
result.enableAllClasses();
result.enable(Capability.NO_CLASS); // filter doesn't need class to be set
return result;
}
//Main and getters and setters
}
And this is how I call it:
TestFilter filter = new TestFilter();
filter.setA(data.attribute("gender"));
filter.setB(data.attribute("age"));
filter.setInputFormat(data);
Instances test = Filter.useFilter(data, filter);
filter.setInvert(true);
filter.setInputFormat(data);
Instances train = Filter.useFilter(data, filter);
It seems to me quite stupid that I would need to use those two lines between the calls. I suspect I should use isBatchFinished(), does it mean I have to implement it extending BatchFilter rather then SimpleBatchFilter? It would be also helpful to see some successful implementations, since the only ones I could find where the ones in the WEKA manual.
I solved it by extending a Filter instead and changing the process function to batchFinished(). I am posting this answer as I have not found a custom filter example anywhere else.
#Override
public boolean batchFinished() throws Exception {
if(isFirstBatchDone()) {
invert = true;
}
if (getInputFormat() == null)
throw new NullPointerException("No input instance format defined");
Instances inst = getInputFormat();
ArrayList<Instances> partitionsA = partition(inst, a);
ArrayList<Instances> partitions = new ArrayList<Instances>();
for(Instances data: partitionsA) {
partitions.addAll(partition(data, b));
}
private void getTestSet(List<Instances> insts) throws Exception {
for(Instances inst: insts) {
Resample filter = new Resample();
filter.setRandomSeed(seed);
filter.setNoReplacement(true);
filter.setInvertSelection(invert);
filter.setSampleSizePercent(sampleSizePercent);
filter.setInputFormat(inst);
Instances curr = Filter.useFilter(inst, filter);
System.out.println(inst.size() + " " + curr.size());
curr.forEach((i) -> push(i));
}
}
#Override
public boolean setInputFormat(Instances arg) throws Exception {
super.setInputFormat(arg);
Instances outputFormat = new Instances(arg, 0);
setOutputFormat(outputFormat);
return true;
}
I am trying to batch together a few cypher queries with the REST API (using the java bindings library) so that only one call is made over the wire. But it seems to not respect the batching on the client side and gives this error:
java.lang.RuntimeException: Error reading as JSON ''
at org.neo4j.rest.graphdb.util.JsonHelper.readJson(JsonHelper.java:57)
at org.neo4j.rest.graphdb.util.JsonHelper.jsonToSingleValue(JsonHelper.java:62)
at org.neo4j.rest.graphdb.RequestResult.toEntity(RequestResult.java:114)
at org.neo4j.rest.graphdb.RequestResult.toMap(RequestResult.java:123)
at org.neo4j.rest.graphdb.batch.RecordingRestRequest.toMap(RecordingRestRequest.java:138)
at org.neo4j.rest.graphdb.ExecutingRestAPI.query(ExecutingRestAPI.java:489)
at org.neo4j.rest.graphdb.ExecutingRestAPI.query(ExecutingRestAPI.java:509)
at org.neo4j.rest.graphdb.RestAPIFacade.query(RestAPIFacade.java:233)
at org.neo4j.rest.graphdb.query.RestCypherQueryEngine.query(RestCypherQueryEngine.java:50)
...
Caused by: java.io.EOFException: No content to map to Object due to end of input
at org.codehaus.jackson.map.ObjectMapper._initForReading(ObjectMapper.java:2766)
at org.codehaus.jackson.map.ObjectMapper._readMapAndClose(ObjectMapper.java:2709)
at org.codehaus.jackson.map.ObjectMapper.readValue(ObjectMapper.java:1854)
at org.neo4j.rest.graphdb.util.JsonHelper.readJson(JsonHelper.java:55)
... 41 more
This is how I am trying to batch them:
graphDatabaseService.getRestAPI().executeBatch(new BatchCallback<Void>() {
#Override
public Void recordBatch(RestAPI batchRestApi) {
String query = "CREATE accounts=({userId:{userId}})-[r:OWNS]->({facebookId:{facebookId}})";
graphDatabaseService.getQueryEngine().query(query, map("userId", 1, "facebookId", "1"));
graphDatabaseService.getQueryEngine().query(query, map("userId", 2, "facebookId", "2"));
graphDatabaseService.getQueryEngine().query(query, map("userId", 3, "facebookId", "3"));
return null;
}
});
I am using noe4j version 1.9 and the corresponding client library. Should this be possible?
Here is a JUnit sample code that works for your batch. Here no string template is used but native methods on the RestAPI object:
public static final DynamicRelationshipType OWNS = DynamicRelationshipType.withName("OWNS");
#Autowired
private SpringRestGraphDatabase graphDatabaseService;
#Test
public void batchTest()
{
Assert.assertNotNull(this.graphDatabaseService);
this.graphDatabaseService.getRestAPI().executeBatch(new BatchCallback<Void>()
{
#Override
public Void recordBatch(RestAPI batchRestApi)
{
for (int counter = 1; counter <= 3; counter++)
{
RestNode userId = batchRestApi.createNode(map("userId", Integer.valueOf(counter)));
RestNode facebookId = batchRestApi.createNode(map("facebookId", Integer.valueOf(counter).toString()));
batchRestApi.createRelationship(userId, facebookId, OWNS, map());
}
return null;
}
});
}
This code has been simplified for this example.
The query is actually returned from a service, which is why I would prefer to write the method this way.
[HttpGet]
public PageResult<ExceptionLog> Logging(ODataQueryOptions<ExceptionLog> options)
{
var query = from o in _exceptionLoggingService.entities.ExceptionDatas
select new ExceptionLog {
ExceptionDataId = o.ExceptionDataId,
SiteId = o.SiteId,
ExceptionDateTime = o.ExceptionDateTime,
StatusCode = o.StatusCode,
Url = o.Url,
ExceptionType = o.ExceptionType,
ExceptionMessage = o.ExceptionMessage,
Exception = o.Exception,
RequestData = o.RequestData
};
var results = options.ApplyTo(query) as IEnumerable<ExceptionLog>;
var count = results.LongCount();
return new PageResult<ExceptionLog>(results, Request.GetNextPageLink(), count);
}
The above code errors on "results.LongCount()" with the following Exception:
SqlException: The text, ntext, and image data types cannot be compared or sorted, except when using IS NULL or LIKE operator.
It appears that I'm getting an exception with when trying to page, like this "$top=2". Everything works fine if my querystring is like this "$filter=ExceptionDataId gt 100".
Since ExceptionData (the Entity) matches ExceptionLog (business model) I can do something like this as a workaround:
[HttpGet]
public PageResult<ExceptionLog> Logging(ODataQueryOptions<ExceptionData> options)
{
var query = from o in _exceptionLoggingService.entities.ExceptionDatas
orderby o.ExceptionDateTime descending
select o;
var results = from o in options.ApplyTo(query) as IEnumerable<ExceptionData>
select new ExceptionLog {
ExceptionDataId = o.ExceptionDataId,
SiteId = o.SiteId,
ExceptionDateTime = o.ExceptionDateTime,
StatusCode = o.StatusCode,
Url = o.Url,
ExceptionType = o.ExceptionType,
ExceptionMessage = o.ExceptionMessage,
Exception = o.Exception,
RequestData = o.RequestData
};
return new PageResult<ExceptionLog>(results, Request.GetNextPageLink(), results.LongCount());
}
But this doesn't completely work for me because it's a little hackish and I can't use the service's method which already gives me an IQueryable.
Another thing to note, is if the Logging method is converted to IQueryable, everything works correctly. But I need to return the Count with the query so I have to return a PageResult.
This is the workaround I'm using. I only apply the filter from the ODataQueryOptions and I manually apply the Top and Skip.
First I created some extension methods:
using System;
using System.Collections.Generic;
using System.Linq;
namespace System.Web.Http.OData.Query
{
public static class ODataQuerySettingsExtensions
{
public static IEnumerable<T> ApplyFilter<T>(this IQueryable<T> query, ODataQueryOptions<T> options)
{
if (options.Filter == null)
{
return query;
}
return options.Filter.ApplyTo(query, new ODataQuerySettings()) as IEnumerable<T>;
}
public static IEnumerable<T> ApplyTopAndTake<T>(this IEnumerable<T> query, ODataQueryOptions<T> options)
{
IEnumerable<T> value = query;
if (options.Top != null)
{
value = value.Take(options.Top.Value);
}
if (options.Skip != null)
{
value = value.Skip(options.Skip.Value);
}
return value;
}
}
}
Now my method looks like this:
[HttpGet]
public PageResult<ExceptionLog> Logging(ODataQueryOptions<ExceptionLog> options)
{
// GetLogs returns an IQueryable<ExceptionLog> as seen in Question above.
var query = _exceptionLoggingService.GetLogs()
.ApplyFilter(options);
var count = query.Count();
var results = query.ApplyTopAndTake(options);
return new PageResult<ExceptionLog>(results, Request.GetNextPageLink(), count);
}
I have an SIU S12 message that does not contain a PV2 segment. However, when I get the parsed message from NHAPI, the parent group for PV2, the SIU_S12_PATIENT group, return 1 for currentReps ("PV2"), which means the PV2 is present.
var parser = new NHapi.Base.Parser.PipeParser();
var parsedMessage = parser.Parse(message) as NHapi.Model.V231.Message.SIU_S12;
var patientGroup=parsedMessage.GetPATIENT(0);
// This call should not create the segment if it does not exist
int pv2Count=patientGroup.currentReps("PV2");
//pv2Count is 1 here despite no PV2 segment exists in the message
//also Both GetAll("PV2") and SegmentFinder say the PV2 segment is present
//DG1RepetitionsUsed is also 1 despite no DG1 segment is present in the message
I am trying to avoid writing code to evaluate every field in the segment. PV2 is just an example - there are a lot more segments that could be missing from the message source.
I am using NHAPI v 2.4, the latest version.
Update: following Tyson's suggestion I come up with this method;
var parser = new NHapi.Base.Parser.PipeParser();
var parsedMessage = parser.Parse(message) as NHapi.Model.V231.Message.SIU_S12;
var encodingChars = new NHapi.Base.Parser.EncodingCharacters('|', null);
var patientGroup = parsedMessage.GetPATIENT(0);
var dg1 = (NHapi.Model.V231.Segment.DG1) (patientGroup.GetStructure("DG1"));
string encodedDg1 = NHapi.Base.Parser.PipeParser.Encode(dg1, encodingChars);
bool dg1Exists = string.Compare(encodedDg1,
"DG1", StringComparison.InvariantCultureIgnoreCase)==0;
easiest thing that I have found to do is to determine if a segment is in a message is to search the actual string of the message for the segment name plus a pipe. So, for example
if(message.Contains("PV2|"))
{
//do something neat
}
From my experience, it is either that, or examining every sub-field under the segment to see if there is a value.
EDIT
I found another way to check that might work a little better. The PipeParser class has a couple of static methods on it that takes in ISegment, IGroup, and IType objects that will return a string representation of the object NHapi reference.
Sample code:
string validTestMessages =
"MSH|^~\\&|ADT1|MCM|LABADT|MCM|198808181126|SECURITY|ADT^A01|MSG00001|P|2.6\r" +
"EVN|A01-|198808181123\r" +
"PID|||PID1234^5^M11^HBOC^CPI^HV||JONES^WILLIAM^A^III||19610615000000|M||2106-3|1200 N ELM STREET^^GREENSBORO^NC^27401-1020|GL||||S||S|123456789|9-87654^NC\r" +
"PV1|1|I|||||TEST^TEST^TEST||||||||||||||||||||||||||||||||||||||||||||\r";
var encodingChars = new EncodingCharacters('|', null);
PipeParser parser = new PipeParser();
var message = parser.Parse(validTestMessages);
PV1 pv1 = (PV1)message.GetStructure("PV1");
var doctor = pv1.GetAttendingDoctor(0);
string encodedMessage = PipeParser.Encode(pv1, encodingChars);
Console.WriteLine(encodedMessage);
encodedMessage = PipeParser.Encode(doctor, encodingChars);
Console.WriteLine(encodedMessage);
Output:
PV1|1|I|||||TEST^TEST^TEST
TEST^TEST^TEST
if there is no segment or the item is empty, then the PiperParser will return an empty string.
You can read segment line by line to a file and add in hl7 Record object and check segment exist or not.
package com.sachan.ranvijay#gmail.com.hl7.msg;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import org.nule.lighthl7lib.hl7.Hl7Record;
import org.nule.lighthl7lib.hl7.Hl7Segment;
import com.stpl.hl7.dto.HL7PatientInfoDTO;
/**
* This class will parse the hl7 message. it can accept message file in the format of java.io.file
* as well as String. Its Uses org.nule.lighthl7lib.hl7.Hl7Record
* as a main component.
* #author Ranvijay.Singh
*
*/
public class PrepareHL7Message {
StringBuilder hl7Msg = new StringBuilder();
Hl7Record record = null;
public PrepareHL7Message(File file) throws Exception {
BufferedReader reader = new BufferedReader(
new FileReader(file));
String str = reader.readLine();
while (str != null) {
hl7Msg.append(str).append("\r");
str = reader.readLine();
}
reader.close();
try{
record = new Hl7Record(hl7Msg.toString());
}catch (Exception e) {
throw e;
}
}
public PrepareHL7Message(String msg) throws Exception {
try{
record = new Hl7Record(msg);
}catch (Exception e) {
throw e;
}
}
private HL7PatientInfoDTO getPatientOrderingPhysician(HL7PatientInfoDTO padto) {
Hl7Segment seg = record.getSegment("PV1");
if(seg!=null)
padto.setOrderingPhysician(seg.field(7).toString());
return padto;
}
}
//DTO.............
package com.sachan.ranvijay#gmail.com.hl7.dto;
public class HL7PatientInfoDTO {
/**
* maped with PV1-7
*/
private String orderingPhysician;
/**
* #return the orderingPhysician
*/
public String getOrderingPhysician() {
return orderingPhysician;
}
/**
* #param orderingPhysician the orderingPhysician to set
*/
public void setOrderingPhysician(String orderingPhysician) {
this.orderingPhysician = orderingPhysician;
}
}