tika PackageParser does not work with directories - apache-tika

I am writing a class to recursively extract files from inside a zip file and produce them to a Kafka queue for further processing. My intent is to be able to extract files from multiple levels of zip. The code below is my implementation of the tika ContainerExtractor to do this.
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.Stack;
import org.apache.commons.lang.StringUtils;
import org.apache.tika.config.TikaConfig;
import org.apache.tika.detect.DefaultDetector;
import org.apache.tika.detect.Detector;
import org.apache.tika.exception.TikaException;
import org.apache.tika.extractor.ContainerExtractor;
import org.apache.tika.extractor.EmbeddedResourceHandler;
import org.apache.tika.io.TemporaryResources;
import org.apache.tika.io.TikaInputStream;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.mime.MediaType;
import org.apache.tika.parser.AbstractParser;
import org.apache.tika.parser.AutoDetectParser;
import org.apache.tika.parser.ParseContext;
import org.apache.tika.parser.Parser;
import org.apache.tika.parser.pkg.PackageParser;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
public class UberContainerExtractor implements ContainerExtractor {
/**
*
*/
private static final long serialVersionUID = -6636138154366178135L;
// statically populate SUPPORTED_TYPES
static {
Set<MediaType> supportedTypes = new HashSet<MediaType>();
ParseContext context = new ParseContext();
supportedTypes.addAll(new PackageParser().getSupportedTypes(context));
SUPPORTED_TYPES = Collections.unmodifiableSet(supportedTypes);
}
/**
* A stack that maintains the parent filenames for the recursion
*/
Stack<String> parentFileNames = new Stack<String>();
/**
* The default tika parser
*/
private final Parser parser;
/**
* Default tika detector
*/
private final Detector detector;
/**
* The supported container types into which we can recurse
*/
public final static Set<MediaType> SUPPORTED_TYPES;
/**
* The number of documents recursively extracted from the container and its
* children containers if present
*/
int extracted;
public UberContainerExtractor() {
this(TikaConfig.getDefaultConfig());
}
public UberContainerExtractor(TikaConfig config) {
this(new DefaultDetector(config.getMimeRepository()));
}
public UberContainerExtractor(Detector detector) {
this.parser = new AutoDetectParser(new PackageParser());
this.detector = detector;
}
public boolean isSupported(TikaInputStream input) throws IOException {
MediaType type = detector.detect(input, new Metadata());
return SUPPORTED_TYPES.contains(type);
}
#Override
public void extract(TikaInputStream stream, ContainerExtractor recurseExtractor, EmbeddedResourceHandler handler)
throws IOException, TikaException {
ParseContext context = new ParseContext();
context.set(Parser.class, new RecursiveParser(recurseExtractor, handler));
try {
Metadata metadata = new Metadata();
parser.parse(stream, new DefaultHandler(), metadata, context);
} catch (SAXException e) {
throw new TikaException("Unexpected SAX exception", e);
}
}
private class RecursiveParser extends AbstractParser {
/**
*
*/
private static final long serialVersionUID = -7260171956667273262L;
private final ContainerExtractor extractor;
private final EmbeddedResourceHandler handler;
private RecursiveParser(ContainerExtractor extractor, EmbeddedResourceHandler handler) {
this.extractor = extractor;
this.handler = handler;
}
public Set<MediaType> getSupportedTypes(ParseContext context) {
return parser.getSupportedTypes(context);
}
public void parse(InputStream stream, ContentHandler ignored, Metadata metadata, ParseContext context)
throws IOException, SAXException, TikaException {
TemporaryResources tmp = new TemporaryResources();
try {
TikaInputStream tis = TikaInputStream.get(stream, tmp);
// Figure out what we have to process
String filename = metadata.get(Metadata.RESOURCE_NAME_KEY);
MediaType type = detector.detect(tis, metadata);
if (extractor == null) {
// do nothing
} else {
// Use a temporary file to process the stream
File file = tis.getFile();
System.out.println("file is directory = " + file.isDirectory());
// Recurse and extract if the filetype is supported
if (SUPPORTED_TYPES.contains(type)) {
System.out.println("encountered a supported file:" + filename);
parentFileNames.push(filename);
extractor.extract(tis, extractor, handler);
parentFileNames.pop();
} else { // produce the file
List<String> parentFilenamesList = new ArrayList<String>(parentFileNames);
parentFilenamesList.add(filename);
String originalFilepath = StringUtils.join(parentFilenamesList, "/");
System.out.println("producing " + filename + " with originalFilepath:" + originalFilepath
+ " to kafka queue");
++extracted;
}
}
} finally {
tmp.dispose();
}
}
}
public int getExtracted() {
return extracted;
}
public static void main(String[] args) throws IOException, TikaException {
String filename = "/Users/rohit/Data/cd.zip";
File file = new File(filename);
TikaInputStream stream = TikaInputStream.get(file);
ContainerExtractor recursiveExtractor = new UberContainerExtractor();
EmbeddedResourceHandler resourceHandler = new EmbeddedResourceHandler() {
#Override
public void handle(String filename, MediaType mediaType, InputStream stream) {
// do nothing
}
};
recursiveExtractor.extract(stream, recursiveExtractor, resourceHandler);
stream.close();
System.out.println("extracted " + ((UberContainerExtractor) recursiveExtractor).getExtracted() + " files");
}
}
It works on multiple levels of zip as long as the files inside the zips are in a flat structure. for ex.
cd.zip
- c.txt
- d.txt
The code does not work if there the files in the zip are present inside a directory. for ex.
ab.zip
- ab/
- a.txt
- b.txt
While debugging I came across the following code snippet in the PackageParser
try {
ArchiveEntry entry = ais.getNextEntry();
while (entry != null) {
if (!entry.isDirectory()) {
parseEntry(ais, entry, extractor, xhtml);
}
entry = ais.getNextEntry();
}
} finally {
ais.close();
}
I tried to comment out the if condition but it did not work. Is there a reason why this is commented? Is there any way of getting around this?
I am using tika version 1.6

Tackling your question in reverse order:
Is there a reason why this is commented?
Entries in zip files are either directories or files. If files, they include the name of the directory they come from. As such, Tika doesn't need to do anything with the directories, all it needs to do is process the embedded files as and when they come up
The code does not work if there the files in the zip are present inside a directory. for ex. ab.zip - ab/ - a.txt - b.txt
You seem to be doing something wrong then. Tika's recursion and package parser handle zips with folders in them just fine!
To prove this, start with a zip file like this:
$ unzip -l ../tt.zip
Archive: ../tt.zip
Length Date Time Name
--------- ---------- ----- ----
0 2015-02-03 16:42 t/
0 2015-02-03 16:42 t/t2/
0 2015-02-03 16:42 t/t2/t3/
164404 2015-02-03 16:42 t/t2/t3/test.jpg
--------- -------
164404 4 files
Now, make us of the -z extraction flag of the Tika App, which causes Tika to extract out all of the embedded contents of a file. Run like that, and we get
$ java -jar tika-app-1.7.jar -z ../tt.zip
Extracting 't/t2/t3/test.jpg' (image/jpeg) to ./t/t2/t3/test.jpg
Then list the resulting directory, and we see
$ find . -type f
./t/t2/t3/Test.jpg
I can't see what's wrong with your code, but sadly for you we've shown that the problem is there, and not with Tika... You'd be best off reviewing the various examples of recursion that Tika provides, such as the Tika App tool and the Recursing Parser Wrapper, then re-write your code to be something simple based from those

Related

Does Apache Beam support custom file names for its output?

While in a distributed processing environment it is common to use "part" file names such as "part-000", is it possible to write an extension of some sort to rename the individual output file names (such as a per window file name) of Apache Beam?
To do this, one might have to be able to assign a name for a window or infer a file name based on the window's content. I would like to know if such an approach is possible.
As to whether the solution should be streaming or batch, a streaming mode example is preferable
Yes as suggested by jkff you can achieve this using TextIO.write.to(FilenamePolicy).
Examples are below:
If you want to write output to particular local file you can use:
lines.apply(TextIO.write().to("/path/to/file.txt"));
Below is the simple way to write the output using the prefix, link. This example is for google storage, instead of this you can use local/s3 paths.
public class MinimalWordCountJava8 {
public static void main(String[] args) {
PipelineOptions options = PipelineOptionsFactory.create();
// In order to run your pipeline, you need to make following runner specific changes:
//
// CHANGE 1/3: Select a Beam runner, such as BlockingDataflowRunner
// or FlinkRunner.
// CHANGE 2/3: Specify runner-required options.
// For BlockingDataflowRunner, set project and temp location as follows:
// DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
// dataflowOptions.setRunner(BlockingDataflowRunner.class);
// dataflowOptions.setProject("SET_YOUR_PROJECT_ID_HERE");
// dataflowOptions.setTempLocation("gs://SET_YOUR_BUCKET_NAME_HERE/AND_TEMP_DIRECTORY");
// For FlinkRunner, set the runner as follows. See {#code FlinkPipelineOptions}
// for more details.
// options.as(FlinkPipelineOptions.class)
// .setRunner(FlinkRunner.class);
Pipeline p = Pipeline.create(options);
p.apply(TextIO.read().from("gs://apache-beam-samples/shakespeare/*"))
.apply(FlatMapElements
.into(TypeDescriptors.strings())
.via((String word) -> Arrays.asList(word.split("[^\\p{L}]+"))))
.apply(Filter.by((String word) -> !word.isEmpty()))
.apply(Count.<String>perElement())
.apply(MapElements
.into(TypeDescriptors.strings())
.via((KV<String, Long> wordCount) -> wordCount.getKey() + ": " + wordCount.getValue()))
// CHANGE 3/3: The Google Cloud Storage path is required for outputting the results to.
.apply(TextIO.write().to("gs://YOUR_OUTPUT_BUCKET/AND_OUTPUT_PREFIX"));
p.run().waitUntilFinish();
}
}
This example code will give you more control on writing the output:
/**
* A {#link FilenamePolicy} produces a base file name for a write based on metadata about the data
* being written. This always includes the shard number and the total number of shards. For
* windowed writes, it also includes the window and pane index (a sequence number assigned to each
* trigger firing).
*/
protected static class PerWindowFiles extends FilenamePolicy {
private final ResourceId prefix;
public PerWindowFiles(ResourceId prefix) {
this.prefix = prefix;
}
public String filenamePrefixForWindow(IntervalWindow window) {
String filePrefix = prefix.isDirectory() ? "" : prefix.getFilename();
return String.format(
"%s-%s-%s", filePrefix, formatter.print(window.start()), formatter.print(window.end()));
}
#Override
public ResourceId windowedFilename(int shardNumber,
int numShards,
BoundedWindow window,
PaneInfo paneInfo,
OutputFileHints outputFileHints) {
IntervalWindow intervalWindow = (IntervalWindow) window;
String filename =
String.format(
"%s-%s-of-%s%s",
filenamePrefixForWindow(intervalWindow),
shardNumber,
numShards,
outputFileHints.getSuggestedFilenameSuffix());
return prefix.getCurrentDirectory().resolve(filename, StandardResolveOptions.RESOLVE_FILE);
}
#Override
public ResourceId unwindowedFilename(
int shardNumber, int numShards, OutputFileHints outputFileHints) {
throw new UnsupportedOperationException("Unsupported.");
}
}
#Override
public PDone expand(PCollection<InputT> teamAndScore) {
if (windowed) {
teamAndScore
.apply("ConvertToRow", ParDo.of(new BuildRowFn()))
.apply(new WriteToText.WriteOneFilePerWindow(filenamePrefix));
} else {
teamAndScore
.apply("ConvertToRow", ParDo.of(new BuildRowFn()))
.apply(TextIO.write().to(filenamePrefix));
}
return PDone.in(teamAndScore.getPipeline());
}
Yes. Per documentation of TextIO:
If you want better control over how filenames are generated than the default policy allows, a custom FilenamePolicy can also be set using TextIO.Write.to(FilenamePolicy)
This is perfectly valid example with beam 2.1.0. You can call on your data (PCollection e.g)
import org.apache.beam.sdk.io.FileBasedSink.FilenamePolicy;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.io.fs.ResolveOptions.StandardResolveOptions;
import org.apache.beam.sdk.io.fs.ResourceId;
import org.apache.beam.sdk.transforms.display.DisplayData;
#SuppressWarnings("serial")
public class FilePolicyExample {
public static void main(String[] args) {
FilenamePolicy policy = new WindowedFilenamePolicy("somePrefix");
//data
data.apply(TextIO.write().to("your_DIRECTORY")
.withFilenamePolicy(policy)
.withWindowedWrites()
.withNumShards(4));
}
private static class WindowedFilenamePolicy extends FilenamePolicy {
final String outputFilePrefix;
WindowedFilenamePolicy(String outputFilePrefix) {
this.outputFilePrefix = outputFilePrefix;
}
#Override
public ResourceId windowedFilename(
ResourceId outputDirectory, WindowedContext input, String extension) {
String filename = String.format(
"%s-%s-%s-of-%s-pane-%s%s%s",
outputFilePrefix,
input.getWindow(),
input.getShardNumber(),
input.getNumShards() - 1,
input.getPaneInfo().getIndex(),
input.getPaneInfo().isLast() ? "-final" : "",
extension);
return outputDirectory.resolve(filename, StandardResolveOptions.RESOLVE_FILE);
}
#Override
public ResourceId unwindowedFilename(
ResourceId outputDirectory, Context input, String extension) {
throw new UnsupportedOperationException("Expecting windowed outputs only");
}
#Override
public void populateDisplayData(DisplayData.Builder builder) {
builder.add(DisplayData.item("fileNamePrefix", outputFilePrefix)
.withLabel("File Name Prefix"));
}
}
}
You can check https://beam.apache.org/releases/javadoc/2.3.0/org/apache/beam/sdk/io/FileIO.html for more information, you should search "File naming" in "Writing files".
.apply(
FileIO.<RootElement>write()
.via(XmlIO
.sink(RootElement.class)
.withRootElement(ROOT_XML_ELEMENT)
.withCharset(StandardCharsets.UTF_8))
.to(FILE_PATH)
.withNaming((window, pane, numShards, shardIndex, compression) -> NEW_FILE_NAME)

Generate one file for a list of parsed files using source_gen in dart

I have a list of models that I need to create a mini reflective system.
I analyzed the Serializable package and understood how to create one generated file per file, however, I couldn't find how can I create one file for a bulk of files.
So, how to dynamically generate one file, using source_gen, for a list of files?
Example:
Files
user.dart
category.dart
Generated:
info.dart (containg information from user.dart and category.dart)
Found out how to do it with the help of people in Gitter.
You must have one file, even if empty, to call the generator. In my example, it is lib/batch.dart.
source_gen: ^0.5.8
Here is the working code:
The tool/build.dart
import 'package:build_runner/build_runner.dart';
import 'package:raoni_global/phase.dart';
main() async {
PhaseGroup pg = new PhaseGroup()
..addPhase(batchModelablePhase(const ['lib/batch.dart']));
await build(pg,
deleteFilesByDefault: true);
}
The phase:
batchModelablePhase([Iterable<String> globs =
const ['bin/**.dart', 'web/**.dart', 'lib/**.dart']]) {
return new Phase()
..addAction(
new GeneratorBuilder(const
[const BatchGenerator()], isStandalone: true
),
new InputSet(new PackageGraph.forThisPackage().root.name, globs));
}
The generator:
import 'dart:async';
import 'package:analyzer/dart/element/element.dart';
import 'package:build/build.dart';
import 'package:source_gen/source_gen.dart';
import 'package:glob/glob.dart';
import 'package:build_runner/build_runner.dart';
class BatchGenerator extends Generator {
final String path;
const BatchGenerator({this.path: 'lib/models/*.dart'});
#override
Future<String> generate(Element element, BuildStep buildStep) async {
// this makes sure we parse one time only
if (element is! LibraryElement)
return null;
String libraryName = 'raoni_global', filePath = 'lib/src/model.dart';
String className = 'Modelable';
// find the files at the path designed
var l = buildStep.findAssets(new Glob(path));
// get the type of annotation that we will use to search classes
var resolver = await buildStep.resolver;
var assetWithAnnotationClass = new AssetId(libraryName, filePath);
var annotationLibrary = resolver.getLibrary(assetWithAnnotationClass);
var exposed = annotationLibrary.getType(className).type;
// the caller library' name
String libName = new PackageGraph.forThisPackage().root.name;
await Future.forEach(l.toList(), (AssetId aid) async {
LibraryElement lib;
try {
lib = resolver.getLibrary(aid);
} catch (e) {}
if (lib != null && Utils.isNotEmpty(lib.name)) {
// all objects within the file
lib.units.forEach((CompilationUnitElement unit) {
// only the types, not methods
unit.types.forEach((ClassElement el) {
// only the ones annotated
if (el.metadata.any((ElementAnnotation ea) =>
ea.computeConstantValue().type == exposed)) {
// use it
}
});
});
}
});
return '''
$libName
''';
}
}
It seems what you want is what this issue is about How to generate one output from many inputs (aggregate builder)?
[Günter]'s answer helped me somewhat.
Buried in that thread is another thread which links to a good example of an aggregating builder:
1https://github.com/matanlurey/build/blob/147083da9b6a6c70c46eb910a3e046239a2a0a6e/docs/writing_an_aggregate_builder.md
The gist is this:
import 'package:build/build.dart';
import 'package:glob/glob.dart';
class AggregatingBuilder implements Builder {
/// Glob of all input files
static final inputFiles = new Glob('lib/**');
#override
Map<String, List<String>> get buildExtensions {
/// '$lib$' is a synthetic input that is used to
/// force the builder to build only once.
return const {'\$lib$': const ['all_files.txt']};
}
#override
Future<void> build(BuildStep buildStep) async {
/// Do some operation on the files
final files = <String>[];
await for (final input in buildStep.findAssets(inputFiles)) {
files.add(input.path);
}
String fileContent = files.join('\n');
/// Write to the file
final outputFile = AssetId(buildStep.inputId.package,'lib/all_files.txt');
return buildStep.writeAsString(outputFile, fileContent);
}
}

Apache Commons IO FileUtils listFiles: how to get list of files with no extension?

I am trying to get a list of files with no extension using org.apache.commons.io.FileUtils.listFiles() like here
http://www.avajava.com/tutorials/lessons/how-do-i-get-all-files-with-certain-extensions-in-a-directory-including-subdirectories.html
package test;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.commons.io.FileUtils;
public class GetAllFilesInDirectoryBasedOnExtensions {
public static void main(String[] args) throws IOException {
File dir = new File("dir");
String[] extensions = new String[] { "txt", "jsp" };
System.out.println("Getting all .txt and .jsp files in " + dir.getCanonicalPath()
+ " including those in subdirectories");
List<File> files = (List<File>) FileUtils.listFiles(dir, extensions, true);
for (File file : files) {
System.out.println("file: " + file.getCanonicalPath());
}
}
}
but I need a list of files with no extension. I've tried {".", ""} but that didn't help. Is it possible at all?
As #Jens reported in his comment, null parameter does the trick:

Neo4J 2.0.2 + Gremlin plugin: the console doesn't run

I'm experimenting with Neo4J + Gremlin plugin, unfortunately I got this error (bellow) when I try to use the Gremlin console web (the console didn't runs on Gremlin language):
SEVERE: The exception contained within MappableContainerException could not be mapped to a response, re-throwing to the HTTP container
java.lang.NoClassDefFoundError: org/neo4j/server/logging/Logger
at org.neo4j.server.webadmin.console.GremlinSession.<clinit>(GremlinSession.java:42)
at org.neo4j.server.webadmin.console.GremlinSessionCreator.newSession(GremlinSessionCreator.java:35)
......
Any suggestions?
Many thanks!
Neo4J 2.0.2 server has changed the Logger class, so I modified GremlinSession.java code in order to use java.util.logging.Logger class instead of org.neo4j.server.logging.Logger. Finally the Gremlin console web works fine:
Here you will find the new GremlinSession.java code.
/**
* Copyright (c) 2002-2014 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.server.webadmin.console;
import com.tinkerpop.blueprints.TransactionalGraph;
import com.tinkerpop.blueprints.impls.neo4j2.Neo4j2Graph;
import groovy.lang.Binding;
import groovy.lang.GroovyRuntimeException;
import org.codehaus.groovy.tools.shell.IO;
import org.neo4j.graphdb.Transaction;
import org.neo4j.helpers.Pair;
import org.neo4j.server.database.Database;
import java.util.logging.Logger;
import java.util.logging.Level;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class GremlinSession implements ScriptSession {
private static final String INIT_FUNCTION = "init()";
private static final Logger log = Logger.getLogger(GremlinSession.class.getName());
private final Database database;
private final IO io;
private final ByteArrayOutputStream baos = new ByteArrayOutputStream();
private final List<String> initialBindings;
protected GremlinWebConsole scriptEngine;
public GremlinSession(Database database) {
this.database = database;
PrintStream out = new PrintStream(new BufferedOutputStream(baos));
io = new IO(System.in, out, out);
Map<String, Object> bindings = new HashMap<String, Object>();
bindings.put("g", getGremlinWrappedGraph());
bindings.put("out", out);
initialBindings = new ArrayList<String>(bindings.keySet());
try {
scriptEngine = new GremlinWebConsole(new Binding(bindings), io);
} catch (final Exception failure) {
scriptEngine = new GremlinWebConsole() {
#Override
public void execute(String script) {
io.out.println("Could not start Groovy during Gremlin initialization, reason:");
failure.printStackTrace(io.out);
}
};
}
}
/**
* Take some gremlin script, evaluate it in the context of this gremlin
* session, and return the result.
*
* #param script
* #return the return string of the evaluation result, or the exception
* message.
*/
#Override
public Pair<String, String> evaluate(String script) {
String result = null;
try (Transaction tx = database.getGraph().beginTx()) {
if (script.equals(INIT_FUNCTION)) {
result = init();
} else {
try {
scriptEngine.execute(script);
result = baos.toString();
} finally {
resetIO();
}
}
tx.success();
} catch (GroovyRuntimeException ex) {
log.log(Level.SEVERE, ex.toString());
result = ex.getMessage();
}
return Pair.of(result, null);
}
private String init() {
StringBuilder out = new StringBuilder();
out.append("\n");
out.append(" \\,,,/\n");
out.append(" (o o)\n");
out.append("-----oOOo-(_)-oOOo-----\n");
out.append("\n");
out.append("Available variables:\n");
for (String variable : initialBindings) {
out.append(" " + variable + "\t= ");
out.append(evaluate(variable));
}
out.append("\n");
return out.toString();
}
private void resetIO() {
baos.reset();
}
private TransactionalGraph getGremlinWrappedGraph() {
Neo4j2Graph neo4jGraph = null;
try {
neo4jGraph = new Neo4j2Graph(database.getGraph());
} catch (Exception e) {
throw new RuntimeException(e);
}
return neo4jGraph;
}
}

jena programming error while reading from an input file .rdf.......Please guide me

package sample;
import java.io.InputStream;
import com.hp.hpl.jena.rdf.model.Model;
import com.hp.hpl.jena.rdf.model.ModelFactory;
import com.hp.hpl.jena.util.FileManager;
public class ReadRDF extends Object {
static final String fileName = "foaf-ijd.rdf";
public static void main(String[] args) {
Model model = ModelFactory.createDefaultModel();
InputStream in = FileManager.get().open(fileName);
if (in == null) {
throw new IllegalArgumentException("File: " + fileName
+ " not found");
}
model.read(in, "");
model.write(System.out);
}
}
Errors getting populated
Exception in thread "main" java.lang.NoSuchMethodError:
org.slf4j.Logger.isTraceEnabled()Z at
com.hp.hpl.jena.util.LocatorFile.open(LocatorFile.java:118) at
com.hp.hpl.jena.util.FileManager.openNoMapOrNull(FileManager.java:527)
at com.hp.hpl.jena.util.FileManager.openNoMap(FileManager.java:510)
at
com.hp.hpl.jena.util.LocationMapper.initFromPath(LocationMapper.java:132)
at com.hp.hpl.jena.util.LocationMapper.get(LocationMapper.java:61)
at com.hp.hpl.jena.util.FileManager.makeGlobal(FileManager.java:116)
at com.hp.hpl.jena.util.FileManager.get(FileManager.java:82) at
sample.ReadRDF.main(ReadRDF.java:17)
This error can apper, if you don't add to CLASSPATH all jar file from /lib dir in Jena ditrib.
Also, if version's slf4j, which you use, an d jena's slf4j is differenf, this error can appear.

Resources