The import org.apache.lucene.queryparser cannot be resolved - parsing

I am using Lucene 6.6 and I am facing difficulty in importing lucene.queryparser and I did check the lucene documentations and it doesn't exist now.
I am using below code. Is there any alternative for queryparser in lucene6.
import java.io.IOException;
import java.text.ParseException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
public class HelloLucene {
public static void main(String[] args) throws IOException, ParseException {
// 0. Specify the analyzer for tokenizing text.
// The same analyzer should be used for indexing and searching
StandardAnalyzer analyzer = new StandardAnalyzer();
// 1. create the index
Directory index = new RAMDirectory();
IndexWriterConfig config = new IndexWriterConfig(analyzer);
IndexWriter w = new IndexWriter(index, config);
addDoc(w, "Lucene in Action", "193398817");
addDoc(w, "Lucene for Dummies", "55320055Z");
addDoc(w, "Managing Gigabytes", "55063554A");
addDoc(w, "The Art of Computer Science", "9900333X");
w.close();
// 2. query
String querystr = args.length > 0 ? args[0] : "lucene";
// the "title" arg specifies the default field to use
// when no field is explicitly specified in the query.
Query q = null;
try {
q = new QueryParser(Version.LUCENE_6_6_0, "title", analyzer).parse(querystr);
} catch (org.apache.lucene.queryparser.classic.ParseException e) {
e.printStackTrace();
}
// 3. search
int hitsPerPage = 10;
IndexReader reader = DirectoryReader.open(index);
IndexSearcher searcher = new IndexSearcher(reader);
TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
searcher.search(q, collector);
ScoreDoc[] hits = collector.topDocs().scoreDocs;
// 4. display results
System.out.println("Found " + hits.length + " hits.");
for (int i = 0; i < hits.length; ++i) {
int docId = hits[i].doc;
Document d = searcher.doc(docId);
System.out.println((i + 1) + ". " + d.get("isbn") + "\t" + d.get("title"));
}
// reader can only be closed when there
// is no need to access the documents any more.
reader.close();
}
private static void addDoc(IndexWriter w, String title, String isbn) throws IOException {
Document doc = new Document();
doc.add(new TextField("title", title, Field.Store.YES));
// use a string field for isbn because we don't want it tokenized
doc.add(new StringField("isbn", isbn, Field.Store.YES));
w.addDocument(doc);
}
}
Thanks!

The problem got solved.
Initially, in the build path, only Lucene-core-6.6.0 was added but lucene-queryparser-6.6.0 is a separate jar file that needs to be added separately.

Related

Java How to format URL as a String to connect with JSoup Malformed URL error

I have a program that connects to a user defined URL from a TextField and scrapes the images on that web page. The user defined URL is gotten from the textfield via .getText() and assigned to a String. The String is then used to connect to the Web page with JSoup and puts the webpage into a document.
String address = labelforAddress.getText();
try {
document = Jsoup.connect(address).get();
}catch(IOException ex){
ex.printStackTrace();
}
I've tried differently formatted URLS: "https://www.", "www.", "https://" but everything I use throws the malformed URL error.
Someone please show me how to get the text from the TextField the correct way.
Below is the entire code.
package sample;
import javafx.application.Application;
import javafx.fxml.FXMLLoader;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.scene.control.Button;
import javafx.scene.control.Label;
import javafx.scene.control.TextField;
import javafx.scene.layout.GridPane;
import javafx.stage.FileChooser;
import javafx.stage.Stage;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.*;
import java.net.URL;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
public class Main extends Application {
Document document;
LinkedList<String> imageURLList = new LinkedList<String>();
ArrayList<File> fileList = new ArrayList<File>();
int fileCount = 1;
#Override
public void start(Stage primaryStage) throws Exception{
Parent root = FXMLLoader.load(getClass().getResource("sample.fxml"));
primaryStage.setTitle("Webpage Photo Scraper");
GridPane gp = new GridPane();
Label labelforAddress = new Label("URL");
GridPane.setConstraints(labelforAddress, 0,0);
TextField URLAddress = new TextField();
GridPane.setConstraints(URLAddress, 1,0);
Button scrape = new Button("Scrape for Photos");
GridPane.setConstraints(scrape, 0,1);
scrape.setOnAction(event->{
String address = labelforAddress.getText();
try {
document = Jsoup.connect(address).get();
}catch(IOException ex){
ex.printStackTrace();
}
Elements imgTags = document.getElementsByAttributeValueContaining("src", "/CharacterImages");
for(Element imgTag: imgTags){
imageURLList.add(imgTag.absUrl("src"));
}
for(String url: imageURLList){
File file = new File("C:\\Users\\Andrei\\Documents\\file" + fileCount + ".txt");
downloadFromURL(url, file);
fileList.add(file);
fileCount++;
}
});
Button exportToZipFile = new Button("Export to Zip File");
GridPane.setConstraints(exportToZipFile, 0,2);
exportToZipFile.setOnAction(event -> {
FileChooser fileChooser = new FileChooser();
FileChooser.ExtensionFilter exfilt = new FileChooser.ExtensionFilter("Zip Files", ".zip");
fileChooser.getExtensionFilters().add(exfilt);
try{
FileOutputStream fos = new FileOutputStream(fileChooser.showSaveDialog(primaryStage));
ZipOutputStream zipOut = new ZipOutputStream(fos);
for(int count = 0; count<=fileList.size()-1; count++){
File fileToZip = new File(String.valueOf(fileList.get(count)));
FileInputStream fis = new FileInputStream(fileToZip);
ZipEntry zipEntry = new ZipEntry(fileToZip.getName());
zipOut.putNextEntry(zipEntry);
byte[] bytes = new byte[1024];
int length;
while((length = fis.read(bytes)) >= 0) {
zipOut.write(bytes, 0, length);
}
fis.close();
}
zipOut.close();
fos.close();
}catch(IOException e1){
e1.printStackTrace();
}
});
primaryStage.setScene(new Scene(gp, 300, 275));
primaryStage.show();
gp.getChildren().addAll(exportToZipFile, labelforAddress, scrape, URLAddress);
}
public static void downloadFromURL(String url, File file){
try {
URL Url = new URL(url);
BufferedInputStream bis = new BufferedInputStream(Url.openStream());
FileOutputStream fis = new FileOutputStream(file);
byte[] buffer = new byte[1024];
int count = 0;
while((count = bis.read(buffer, 0,1024)) !=-1){
fis.write(buffer, 0, count);
}
fis.close();
bis.close();
}catch(IOException e){
e.printStackTrace();
}
}
public static void main(String[] args) {
launch(args);
}
}
Your text field containing the value entered by user is stored in URLAddress object but you always try to get the url from labelforAddress object which is a label always containing "URL" text.
So the solution is to use:
String address = URLAddress.getText();
If you read carefully error message it would help you to find the cause, because it always displays the value it considers wrong. In this case I see:
Caused by: java.net.MalformedURLException: no protocol: URL
and it shows the unrecognized address is: URL.
If you encounter this kind of error next time try:
debugging the aplication in runtime to see values of each variable
logging variable values in the console to see if variables contain values you expect

How read Chinese Characters from XLSX File? (Java)

I can already read in texts from xlsx cells and have:
String s = cell.getStringCellValue();
However when printing out this String, I get rubbish results. To solve this problem I used the Internet.
I tried about 8 different approaches and thus found that there is not yet a working answer on SO. I set the default encoding of my IDE and my XLSX Files to UTF-8. Pinyin can be correctly displayed.
Does anyone have an idea what could be wrong and how to solve this issue?
Not clear wherever your problem using chinese characters comes from, but I cannot reproduce it.
I have the following workbook in Excel:
The following simple code:
import org.apache.poi.ss.usermodel.*;
import org.apache.poi.xssf.usermodel.*;
import java.io.FileInputStream;
class ReadXSSFUnicodeTest {
public static void main(String[] args) {
try {
Workbook wb = WorkbookFactory.create(new FileInputStream("ReadXSSFUnicodeTest.xlsx"));
Sheet sheet = wb.getSheetAt(0);
for (Row row : sheet) {
for (Cell cell : row) {
String string = cell.getStringCellValue();
System.out.println(string);
}
}
wb.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
produces:
If the problem is that Windows is not able displaying Unicode characters properly in CMD console because it has not a font with glyphs for it, then write the content to a text file:
import org.apache.poi.ss.usermodel.*;
import org.apache.poi.xssf.usermodel.*;
import java.io.FileInputStream;
import java.io.Writer;
import java.io.BufferedWriter;
import java.io.OutputStreamWriter;
import java.io.FileOutputStream;
class ReadXSSFUnicodeTest {
public static void main(String[] args) {
try {
Writer out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream("ReadXSSFUnicodeTest.txt"), "UTF-8"));
Workbook wb = WorkbookFactory.create(new FileInputStream("ReadXSSFUnicodeTest.xlsx"));
Sheet sheet = wb.getSheetAt(0);
for (Row row : sheet) {
for (Cell cell : row) {
String string = cell.getStringCellValue();
out.write(string + "\r\n");
System.out.println(string);
}
}
out.close();
wb.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
This file then should have proper content even in Windows Notepad:
You could also using Swing (JTextArea) to provide your own output area for test outputs:
import org.apache.poi.ss.usermodel.*;
import org.apache.poi.xssf.usermodel.*;
import java.io.FileInputStream;
import java.io.Writer;
import java.io.BufferedWriter;
import java.io.OutputStreamWriter;
import java.io.FileOutputStream;
import javax.swing.*;
import java.awt.*;
class ReadXSSFUnicodeTest {
public ReadXSSFUnicodeTest() {
try {
MySystemOut mySystemOut = new MySystemOut();
Workbook wb = WorkbookFactory.create(new FileInputStream("ReadXSSFUnicodeTest.xlsx"));
Sheet sheet = wb.getSheetAt(0);
for (Row row : sheet) {
for (Cell cell : row) {
String string = cell.getStringCellValue();
//System.out.println(string);
mySystemOut.println(string);
}
}
wb.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}
public static void main(String[] args) {
ReadXSSFUnicodeTest readXSSFUnicodeTest= new ReadXSSFUnicodeTest();
}
private class MySystemOut extends JTextArea {
private String output = "";
private MySystemOut() {
super();
this.setLineWrap(true);
JFrame frame = new JFrame("My System Outputs");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
JScrollPane areaScrollPane = new JScrollPane(this);
areaScrollPane.setVerticalScrollBarPolicy(JScrollPane.VERTICAL_SCROLLBAR_ALWAYS);
areaScrollPane.setPreferredSize(new Dimension(350, 150));
frame.getContentPane().add(areaScrollPane, BorderLayout.CENTER);
frame.pack();
frame.setVisible(true);
}
private void println(String output) {
this.output += output + "\r\n";
this.setText(this.output);
this.revalidate();
}
}
}
This is only the simplest way and only to get test outputs since it uses Swing not the right way in terms of AWT threading issues.
I had the same problem while extracting Persian text from an Excel file.
I was using ECLIPSE and change settings like:
Window -> Preferences -> Expand General and
Click Workspace, text file encoding (near bottom) has an encoding chooser.
Select "Other" radio button -> Select UTF-8 from the drop down.
Click Apply and OK button OR click simply OK button
use this Code:
String new_Str = new String(excelfield.getBytes(1), "Cp1256"); //....to Persian text
String new_Str = new String(excelfield.getBytes(1), "UTF-8"); //....to Chinese text
OR
String new_Str = new String(your_str.getBytes(), "Cp1256");
String new_Str = new String(your_str.getBytes(), "UTF-8");

tika PackageParser does not work with directories

I am writing a class to recursively extract files from inside a zip file and produce them to a Kafka queue for further processing. My intent is to be able to extract files from multiple levels of zip. The code below is my implementation of the tika ContainerExtractor to do this.
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.Stack;
import org.apache.commons.lang.StringUtils;
import org.apache.tika.config.TikaConfig;
import org.apache.tika.detect.DefaultDetector;
import org.apache.tika.detect.Detector;
import org.apache.tika.exception.TikaException;
import org.apache.tika.extractor.ContainerExtractor;
import org.apache.tika.extractor.EmbeddedResourceHandler;
import org.apache.tika.io.TemporaryResources;
import org.apache.tika.io.TikaInputStream;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.mime.MediaType;
import org.apache.tika.parser.AbstractParser;
import org.apache.tika.parser.AutoDetectParser;
import org.apache.tika.parser.ParseContext;
import org.apache.tika.parser.Parser;
import org.apache.tika.parser.pkg.PackageParser;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
public class UberContainerExtractor implements ContainerExtractor {
/**
*
*/
private static final long serialVersionUID = -6636138154366178135L;
// statically populate SUPPORTED_TYPES
static {
Set<MediaType> supportedTypes = new HashSet<MediaType>();
ParseContext context = new ParseContext();
supportedTypes.addAll(new PackageParser().getSupportedTypes(context));
SUPPORTED_TYPES = Collections.unmodifiableSet(supportedTypes);
}
/**
* A stack that maintains the parent filenames for the recursion
*/
Stack<String> parentFileNames = new Stack<String>();
/**
* The default tika parser
*/
private final Parser parser;
/**
* Default tika detector
*/
private final Detector detector;
/**
* The supported container types into which we can recurse
*/
public final static Set<MediaType> SUPPORTED_TYPES;
/**
* The number of documents recursively extracted from the container and its
* children containers if present
*/
int extracted;
public UberContainerExtractor() {
this(TikaConfig.getDefaultConfig());
}
public UberContainerExtractor(TikaConfig config) {
this(new DefaultDetector(config.getMimeRepository()));
}
public UberContainerExtractor(Detector detector) {
this.parser = new AutoDetectParser(new PackageParser());
this.detector = detector;
}
public boolean isSupported(TikaInputStream input) throws IOException {
MediaType type = detector.detect(input, new Metadata());
return SUPPORTED_TYPES.contains(type);
}
#Override
public void extract(TikaInputStream stream, ContainerExtractor recurseExtractor, EmbeddedResourceHandler handler)
throws IOException, TikaException {
ParseContext context = new ParseContext();
context.set(Parser.class, new RecursiveParser(recurseExtractor, handler));
try {
Metadata metadata = new Metadata();
parser.parse(stream, new DefaultHandler(), metadata, context);
} catch (SAXException e) {
throw new TikaException("Unexpected SAX exception", e);
}
}
private class RecursiveParser extends AbstractParser {
/**
*
*/
private static final long serialVersionUID = -7260171956667273262L;
private final ContainerExtractor extractor;
private final EmbeddedResourceHandler handler;
private RecursiveParser(ContainerExtractor extractor, EmbeddedResourceHandler handler) {
this.extractor = extractor;
this.handler = handler;
}
public Set<MediaType> getSupportedTypes(ParseContext context) {
return parser.getSupportedTypes(context);
}
public void parse(InputStream stream, ContentHandler ignored, Metadata metadata, ParseContext context)
throws IOException, SAXException, TikaException {
TemporaryResources tmp = new TemporaryResources();
try {
TikaInputStream tis = TikaInputStream.get(stream, tmp);
// Figure out what we have to process
String filename = metadata.get(Metadata.RESOURCE_NAME_KEY);
MediaType type = detector.detect(tis, metadata);
if (extractor == null) {
// do nothing
} else {
// Use a temporary file to process the stream
File file = tis.getFile();
System.out.println("file is directory = " + file.isDirectory());
// Recurse and extract if the filetype is supported
if (SUPPORTED_TYPES.contains(type)) {
System.out.println("encountered a supported file:" + filename);
parentFileNames.push(filename);
extractor.extract(tis, extractor, handler);
parentFileNames.pop();
} else { // produce the file
List<String> parentFilenamesList = new ArrayList<String>(parentFileNames);
parentFilenamesList.add(filename);
String originalFilepath = StringUtils.join(parentFilenamesList, "/");
System.out.println("producing " + filename + " with originalFilepath:" + originalFilepath
+ " to kafka queue");
++extracted;
}
}
} finally {
tmp.dispose();
}
}
}
public int getExtracted() {
return extracted;
}
public static void main(String[] args) throws IOException, TikaException {
String filename = "/Users/rohit/Data/cd.zip";
File file = new File(filename);
TikaInputStream stream = TikaInputStream.get(file);
ContainerExtractor recursiveExtractor = new UberContainerExtractor();
EmbeddedResourceHandler resourceHandler = new EmbeddedResourceHandler() {
#Override
public void handle(String filename, MediaType mediaType, InputStream stream) {
// do nothing
}
};
recursiveExtractor.extract(stream, recursiveExtractor, resourceHandler);
stream.close();
System.out.println("extracted " + ((UberContainerExtractor) recursiveExtractor).getExtracted() + " files");
}
}
It works on multiple levels of zip as long as the files inside the zips are in a flat structure. for ex.
cd.zip
- c.txt
- d.txt
The code does not work if there the files in the zip are present inside a directory. for ex.
ab.zip
- ab/
- a.txt
- b.txt
While debugging I came across the following code snippet in the PackageParser
try {
ArchiveEntry entry = ais.getNextEntry();
while (entry != null) {
if (!entry.isDirectory()) {
parseEntry(ais, entry, extractor, xhtml);
}
entry = ais.getNextEntry();
}
} finally {
ais.close();
}
I tried to comment out the if condition but it did not work. Is there a reason why this is commented? Is there any way of getting around this?
I am using tika version 1.6
Tackling your question in reverse order:
Is there a reason why this is commented?
Entries in zip files are either directories or files. If files, they include the name of the directory they come from. As such, Tika doesn't need to do anything with the directories, all it needs to do is process the embedded files as and when they come up
The code does not work if there the files in the zip are present inside a directory. for ex. ab.zip - ab/ - a.txt - b.txt
You seem to be doing something wrong then. Tika's recursion and package parser handle zips with folders in them just fine!
To prove this, start with a zip file like this:
$ unzip -l ../tt.zip
Archive: ../tt.zip
Length Date Time Name
--------- ---------- ----- ----
0 2015-02-03 16:42 t/
0 2015-02-03 16:42 t/t2/
0 2015-02-03 16:42 t/t2/t3/
164404 2015-02-03 16:42 t/t2/t3/test.jpg
--------- -------
164404 4 files
Now, make us of the -z extraction flag of the Tika App, which causes Tika to extract out all of the embedded contents of a file. Run like that, and we get
$ java -jar tika-app-1.7.jar -z ../tt.zip
Extracting 't/t2/t3/test.jpg' (image/jpeg) to ./t/t2/t3/test.jpg
Then list the resulting directory, and we see
$ find . -type f
./t/t2/t3/Test.jpg
I can't see what's wrong with your code, but sadly for you we've shown that the problem is there, and not with Tika... You'd be best off reviewing the various examples of recursion that Tika provides, such as the Tika App tool and the Recursing Parser Wrapper, then re-write your code to be something simple based from those

Twitter authentication credentials

I am new to the social network analysis and twitter API.I wanted to collect tweets on certain topic .So i have written the following code
package com;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.List;
import twitter4j.Query;
import twitter4j.QueryResult;
import twitter4j.Status;
import twitter4j.Tweet;
import twitter4j.Twitter;
import twitter4j.TwitterException;
import twitter4j.TwitterFactory;
public class TwitterSearchAdvance {
public static void main(String[] vishal) throws TwitterException,
IOException {
// List<Tweet> Data = new ArrayList<Tweet>();
StringBuffer stringbuffer = new StringBuffer();
Twitter twitter = new TwitterFactory().getInstance();
for (int page = 0; page <= 4; page++) {
Query query = new Query("Airtel");
// query.setRpp(100); // 100 results per page
QueryResult qr = twitter.search(query);
List<Status> qrTweets = qr.getTweets();
System.out.println("-------------------" + qrTweets.size());
// break out if there are no more tweets
for (Status t : qrTweets) {
System.out.println(t.getCreatedAt() + ": " + t.getText());
stringbuffer.append(t);
stringbuffer.append("\n");
BufferedWriter bw = new BufferedWriter(new FileWriter(new File(
"/home/vishal/FirstDocu.txt"), true));
bw.write(stringbuffer.toString());
bw.newLine();
// bw.write(t.getCreatedAt() + ": " + t.getText());
bw.close();
}
}
}
}
But when i run the following program the following error starts coming
Exception in thread "main" java.lang.IllegalStateException: Authentication credentials are missing. See http://twitter4j.org/configuration.html for the detail.
at twitter4j.TwitterBaseImpl.ensureAuthorizationEnabled(TwitterBaseImpl.java:200)
at twitter4j.TwitterImpl.get(TwitterImpl.java:1833)
at twitter4j.TwitterImpl.search(TwitterImpl.java:282)
at com.TwitterSearchAdvance.main(TwitterSearchAdvance.java:28)
Where do i need to provide credentials in my program
Thanks
Have a look at the options here http://twitter4j.org/en/configuration.html
There are a number of ways to provide credentials to your program:
Properties File
ConfigurationBuilder class
System properties
Environment Variables
All details and instructions can be found in the link

Primefaces dataExporter - inline PDF in new window without footer

I'm trying to use Primefaces's dataExporter component. I have two problems with it (for now):
In column footers of datatable I have a p:commandButton. What happens to me is that when I export datatable with dataExporter to PDF I can see that it added column footers to PDF and wrote something like this: javax.faces.component.UIPanel#9e08b9 (it just called toString of UIComponent I suppose). Is there any way to instruct dataExporter to ignore column footers?
I want to try to somehow open new window with generated PDF and show that PDF inline on a new page. I don't wont to see Download file prompt. Is this possible?
After some time, and coding I finally succeeded to make this work. Primefaces doesn't have this functions included so I have to override default behavior little bit.
First I created custom PDFExporter to skip footer printing:
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import javax.faces.context.ExternalContext;
import javax.faces.context.FacesContext;
import org.primefaces.component.datatable.DataTable;
import org.primefaces.component.export.PDFExporter;
import com.lowagie.text.DocumentException;
import com.lowagie.text.pdf.PdfPTable;
public class CustomPDFExporter extends PDFExporter {
#Override
protected void addColumnFacets(DataTable table, PdfPTable pdfTable,
ColumnType columnType) {
if (columnType == ColumnType.HEADER) {
super.addColumnFacets(table, pdfTable, columnType);
}
}
#Override
protected void writePDFToResponse(ExternalContext externalContext,
ByteArrayOutputStream baos, String fileName) throws IOException,
DocumentException {
FacesContext.getCurrentInstance().getExternalContext().getSessionMap()
.put("reportBytes", baos.toByteArray());
FacesContext.getCurrentInstance().getExternalContext().getSessionMap()
.put("reportName", fileName);
}
}
As you can see, report data is added in session.
Now DataExporter of Primefaces should be rewriten:
import java.io.IOException;
import javax.el.ELContext;
import javax.el.MethodExpression;
import javax.el.ValueExpression;
import javax.faces.FacesException;
import javax.faces.component.StateHolder;
import javax.faces.component.UIComponent;
import javax.faces.context.FacesContext;
import javax.faces.event.ActionEvent;
import javax.faces.event.ActionListener;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import org.primefaces.component.datatable.DataTable;
import org.primefaces.component.export.CSVExporter;
import org.primefaces.component.export.Exporter;
import org.primefaces.component.export.ExporterType;
import org.primefaces.component.export.XMLExporter;
import org.primefaces.context.RequestContext;
import asw.iis.common.ui.beans.DatatableBackingBean;
public class CustomDataExporter implements ActionListener, StateHolder {
private ValueExpression target;
private ValueExpression type;
private ValueExpression fileName;
private ValueExpression encoding;
private MethodExpression preProcessor;
private MethodExpression postProcessor;
private DatatableBackingBean<?> datatableBB;
public CustomDataExporter() {}
public CustomDataExporter(ValueExpression target, ValueExpression type, ValueExpression fileName, ValueExpression encoding,
MethodExpression preProcessor, MethodExpression postProcessor, DatatableBackingBean<?> datatableBB) {
this.target = target;
this.type = type;
this.fileName = fileName;
this.encoding = encoding;
this.preProcessor = preProcessor;
this.postProcessor = postProcessor;
this.datatableBB = datatableBB;
}
public void processAction(ActionEvent event) {
FacesContext context = FacesContext.getCurrentInstance();
try {
datatableBB.stampaPreProcess();
ELContext elContext = context.getELContext();
String tableId = (String) target.getValue(elContext);
String exportAs = (String) type.getValue(elContext);
String outputFileName = (String) fileName.getValue(elContext);
String encodingType = "UTF-8";
if(encoding != null) {
encodingType = (String) encoding.getValue(elContext);
}
try {
Exporter exporter;
ExporterType exporterType = ExporterType.valueOf(exportAs.toUpperCase());
switch(exporterType) {
case XLS:
exporter = new ExcelExporter();
break;
case PDF:
exporter = new CustomPDFExporter();
break;
case CSV:
exporter = new CSVExporter();
break;
case XML:
exporter = new XMLExporter();
break;
default:
exporter = new CustomPDFExporter();
break;
}
UIComponent component = event.getComponent().findComponent(tableId);
if(component == null) {
throw new FacesException("Cannot find component \"" + tableId + "\" in view.");
}
if(!(component instanceof DataTable)) {
throw new FacesException("Unsupported datasource target:\"" + component.getClass().getName() + "\", exporter must target a PrimeFaces DataTable.");
}
DataTable table = (DataTable) component;
exporter.export(context, table, outputFileName, false, false, encodingType, preProcessor, postProcessor);
if ("pdf".equals(exportAs)) {
String path = ((ServletContext)(FacesContext.getCurrentInstance().getExternalContext().getContext())).getContextPath();
RequestContext.getCurrentInstance().execute("window.open('" + path + "/report.pdf', '_blank', 'dependent=yes, menubar=no, toolbar=no')");
} else {
context.responseComplete();
}
}
catch (IOException e) {
throw new FacesException(e);
}
}
finally {
((HttpServletRequest) context.getExternalContext().getRequest()).removeAttribute("vrstaStampe");
datatableBB.stampaPostProcess();
}
}
public boolean isTransient() {
return false;
}
public void setTransient(boolean value) {
}
public void restoreState(FacesContext context, Object state) {
Object values[] = (Object[]) state;
target = (ValueExpression) values[0];
type = (ValueExpression) values[1];
fileName = (ValueExpression) values[2];
preProcessor = (MethodExpression) values[3];
postProcessor = (MethodExpression) values[4];
encoding = (ValueExpression) values[5];
datatableBB = (DatatableBackingBean<?>) values[6];
}
public Object saveState(FacesContext context) {
Object values[] = new Object[7];
values[0] = target;
values[1] = type;
values[2] = fileName;
values[3] = preProcessor;
values[4] = postProcessor;
values[5] = encoding;
values[6] = datatableBB;
return ((Object[]) values);
}
}
Main job here is to instantiate CustomPDFExporter, and open new window in case when report type is PDF.
Now I wrote simple Servlet to handle this request and print report data:
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
#WebServlet("/report.pdf")
public class PdfReportServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
#Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
byte[] b = (byte[]) req.getSession().getAttribute("reportBytes");
if (b == null) {
b = new byte[0];
}
resp.setContentType("application/pdf");
resp.setContentLength(b.length);
resp.getOutputStream().write(b);
req.getSession().removeAttribute("reportBytes");
}
}
This will configure HTTP header, and print bytes of report to output stream. Also it removes report bytes from session.
Finally, as I handle this printing from dynamically created menu button I add this action listener in code:
MenuItem item = ...;
ELContext el = FacesContext.getCurrentInstance().getELContext();
ExpressionFactory ef = FacesContext.getCurrentInstance().getApplication().getExpressionFactory();
ValueExpression typeEL = ef.createValueExpression(el, "#{startBean.reportType}", String.class);
ValueExpression fileEL = ef.createValueExpression(el, datasource, String.class);
ValueExpression targetEL = ef.createValueExpression(el, ":" + datatableId + ":datatableForm:datatable", String.class);
ValueExpression encodingEL = ef.createValueExpression(el, "ISO-8859-2", String.class);
CustomDataExporter de = new CustomDataExporter(targetEL, typeEL, fileEL, encodingEL, null, null, this);
item.setAjax(true);
item.addActionListener(de);

Resources