groupingBy on stream<String[]> - java-stream

I have a situation where i have to generate a Map from List
I have a List as below
List<String> li = new ArrayList<String>();
li.add("A:abc");
li.add("B:xyz");
li.add("C:mno");
li.add("B:bbb");
li.add("A:aaa");
li.add("C:xxx");
I want to generate a Map from this as below
Map<String, String[]> or Map<String, List<String>>
I wrote the below lambda expression
li.stream().map(i->i.split(":")).collect(Collectors.groupingBy(i->i[0]));
but the resultant map is Map<String, List<String[]>> , which is not expected

Try it like this.
stream the list
split the string.
groupingBy on first element of array from split
the mapping to put the second element from array in the list.
List<String> li = new ArrayList<String>();
li.add("A:abc");
li.add("B:xyz");
li.add("C:mno");
li.add("B:bbb");
li.add("A:aaa");
li.add("C:xxx");
Map<String, List<String>> map =
li.stream().map(str -> str.split(":"))
.collect(Collectors.groupingBy(a -> a[0],
Collectors.mapping(a -> a[1],
Collectors.toList())));
map.entrySet().forEach(System.out::println);
prints
A=[abc, aaa]
B=[xyz, bbb]
C=[mno, xxx]

The answer from #WJS is perfect, however, if you want to get a Map<String, String[]> like you asked in the question, you might consider extending it using Collectors.collectingAndThen and List#toArray
Map<String, String[]> result = li.stream()
.map(x -> x.split(":"))
.collect(Collectors.groupingBy(
x -> x[0],
Collectors.mapping(
x -> x[1],
Collectors.collectingAndThen(
Collectors.toList(),
x -> x.toArray(String[]::new)
)
)
));
result.forEach((key, value) -> System.out.println(key + " : " + Arrays.toString(value)));
Prints the expected result
A : [abc, aaa]
B : [xyz, bbb]
C : [mno, xxx]

I want to generate a Map from this as below Map<String, String[]> or
Map<String, List<String>>
You do not need to call .map; just collect the stream using Collectors.groupingBy(s -> s.split(":")[0]).
Demo:
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class Main {
public static void main(String[] args) {
List<String> li = new ArrayList<String>();
li.add("A:abc");
li.add("B:xyz");
li.add("C:mno");
li.add("B:bbb");
li.add("A:aaa");
li.add("C:xxx");
Map<String, List<String>> map =
li.stream().collect(Collectors.groupingBy(s -> s.split(":")[0]));
map.entrySet().forEach(System.out::println);
}
}
Output:
A=[A:abc, A:aaa]
B=[B:xyz, B:bbb]
C=[C:mno, C:xxx]
However, if you are expecting a result like
A=[abc, aaa]
B=[xyz, bbb]
C=[mno, xxx]
you can call .map as you are doing and then group by the first element of the array, obtained as a result of splitting, mapping against the list of the second element of the array.
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class Main {
public static void main(String[] args) {
List<String> li = new ArrayList<String>();
li.add("A:abc");
li.add("B:xyz");
li.add("C:mno");
li.add("B:bbb");
li.add("A:aaa");
li.add("C:xxx");
Map<String, List<String>> map =
li.stream()
.map(s -> s.split(":"))
.collect(
Collectors.groupingBy(
arr -> arr[0], Collectors.mapping(
arr -> arr[1], Collectors.toList()
)
)
);
map.entrySet().forEach(System.out::println);
}
}

Related

How to change the final type after reduction of a downstream collector in a Java 8 stream?

I got a legacy application using data structures like those in the following toy snippet and I can't easily change these data structures.
I use a Java 8 (only) stream to do some stats and I failed to get the wished type using Collectors.
package myIssueWithCollector;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.BinaryOperator;
import java.util.stream.Collectors;
public class MyIssueWithCollector {
public static Double latitude(Map<String, String> map) {
String latitude = map.get("LATITUDE");
return Double.valueOf(latitude);
}
private static int latitudeComparator(double d1, double d2) {
// get around the fact that NaN > +Infinity in Double.compare()
if (Double.isNaN(d1) && !Double.isNaN(d2)) {
return -1;
}
if (!Double.isNaN(d1) && Double.isNaN(d2)) {
return 1;
}
return Double.compare(Math.abs(d1), Math.abs(d2));
}
public static Map<String, String> createMap(String city, String country, String continent, String latitude) {
Map<String, String> map = new HashMap<>();
map.put("CITY", city);
map.put("COUNTRY", country);
map.put("CONTINENT", continent);
map.put("LATITUDE", latitude);
return map;
}
public static void main(String[] args) {
// Cities with dummies latitudes
// I can not change easily these legacy data structures
Map<String, String> map1 = createMap("London", "UK", "Europa", "48.1");
Map<String, String> map2 = createMap("New York", "USA", "America", "42.4");
Map<String, String> map3 = createMap("Miami", "USA", "America", "39.1");
Map<String, String> map4 = createMap("Glasgow", "UK", "Europa", "49.2");
Map<String, String> map5 = createMap("Camelot", "UK", "Europa", "NaN");
List<Map<String, String>> maps = new ArrayList<>(4);
maps.add(map1);
maps.add(map2);
maps.add(map3);
maps.add(map4);
maps.add(map5);
//////////////////////////////////////////////////////////////////
// My issue starts here:
//////////////////////////////////////////////////////////////////
Map<String, Map<String, Double>> result = maps.stream()
.collect(Collectors.groupingBy(m -> m.get("CONTINENT"),
Collectors.groupingBy(m -> m.get("COUNTRY"), Collectors.reducing(Double.NaN, m -> latitude(m),
BinaryOperator.maxBy((d1, d2) -> latitudeComparator(d1, d2))))));
System.out.println(result);
}
}
I need the result type to be
Map<String, Map<String, String>> instead of Map<String, Map<String, Double>>
by converting back "LATITUDE" from Double to String (using a custom format, not Double.toString() ).
I failed to achieve this with Collectors methods like andThen or collectingAndThen,...
I am currently stuck with Java 8.
Is there a way to get a Map<String, Map<String, String>> result using the same stream ?
Instead of using Collectors.reducing(…) with BinaryOperator.maxBy(…) you can also use Collectors.maxBy. Since this collector doesn’t support an identity value, it requires a finisher function to extract the value from an Optional, but your task requires a finisher anyway, to format the value.
Map<String, Map<String,String>> result = maps.stream()
.collect(Collectors.groupingBy(m -> m.get("CONTINENT"),
Collectors.groupingBy(m -> m.get("COUNTRY"),
Collectors.mapping(MyIssueWithCollector::latitude,
Collectors.collectingAndThen(
Collectors.maxBy(MyIssueWithCollector::latitudeComparator),
o -> format(o.get()))))));
This assumes format to be your custom format function like
private static String format(double d) {
return String.format("%.2f", d);
}
But sometimes, it might be worthwhile to implement your own collector instead of combining multiple built-in collectors.
Map<String, Map<String,String>> result = maps.stream()
.collect(Collectors.groupingBy(m -> m.get("CONTINENT"),
Collectors.groupingBy(m -> m.get("COUNTRY"),
Collector.of(
() -> new double[]{Double.NEGATIVE_INFINITY},
(a, m) -> {
double d = latitude(m);
if(!Double.isNaN(d)) a[0] = Double.max(a[0], d);
},
(a, b) -> a[0] >= b[0]? a: b,
a -> format(a[0])))));
A collector maintains its state using a mutable container, this custom collector uses an array of length one to be able to hold a double value (which eliminates the need to box it to Double objects). Instead of implementing a special comparator to treat NaN specially, it uses a conditional, to never let NaN get into the array in the first place. That’s why the combiner doesn’t need to care about NaN; it can simply return the larger of the two values.
The finisher function just invokes the custom format function with the double value.
You can use Collectors.collectingAndThen to convert the reduced double value to a corresponding String:
Map<String, Map<String, String>> result = maps.stream().collect(
Collectors.groupingBy(
m -> m.get("CONTINENT"),
Collectors.groupingBy(
m -> m.get("COUNTRY"),
Collectors.collectingAndThen(
Collectors.reducing(
Double.NaN,
m -> latitude(m),
BinaryOperator.maxBy(
(d1, d2) -> latitudeComparator(d1, d2)
)
),
MyIssueWithCollector::myToString
)
)
)
);
Here, myToString is some method in the MyIssueWithCollector class to return String from double with your custom format, for example,
public static String myToString(double d) {
return "[latitude=" + d + "]";
}
Using Collectors reducing, you can maintain the latitude's String type in the identity so that the downstream collector is returning a String.
Map < String, Map < String, String >> result = maps.stream()
.collect(
Collectors.groupingBy(m - > m.get("CONTINENT"),
Collectors.groupingBy(m - > m.get("COUNTRY"),
Collectors.reducing("NaN", m - > m.get("LATITUDE"),
BinaryOperator.maxBy((s1, s2) - > latitudeComparator(Double.valueOf(s1), Double.valueOf(s2)))))));

How Can I access the key in subclass of CombinerFn when combining a PCollection of KV pairs?

I'm implementing the CombinePerKeyExample using a subclass of CombineFn instead of using an implementation of SerializableFunction
package me.examples;
import org.apache.beam.sdk.coders.AvroCoder;
import org.apache.beam.sdk.coders.DefaultCoder;
import org.apache.beam.sdk.transforms.Combine.CombineFn;
import java.util.HashSet;
import java.util.Set;
public class ConcatWordsCombineFn extends CombineFn<String, ConcatWordsCombineFn.Accumulator, String> {
#DefaultCoder(AvroCoder.class)
public static class Accumulator{
HashSet<String> plays;
}
#Override
public Accumulator createAccumulator(){
Accumulator accumulator = new Accumulator();
accumulator.plays = new HashSet<>();
return accumulator;
}
#Override
public Accumulator addInput(Accumulator accumulator, String input){
accumulator.plays.add(input);
return accumulator;
}
#Override
public Accumulator mergeAccumulators(Iterable<Accumulator> accumulators){
Accumulator mergeAccumulator = new Accumulator();
mergeAccumulator.plays = new HashSet<>();
for(Accumulator accumulator: accumulators){
mergeAccumulator.plays.addAll(accumulator.plays);
}
return mergeAccumulator;
}
#Override
public String extractOutput(Accumulator accumulator){
//how to access the key here ?
return String.join(",", accumulator.plays);
}
}
The pipeline is composed of a ReadFromBigQuery, ExtractAllPlaysOfWords (code below) and WriteToBigQuery
package me.examples;
import com.google.api.services.bigquery.model.TableRow;
import org.apache.beam.sdk.coders.KvCoder;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.transforms.Combine;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
public class PlaysForWord extends PTransform<PCollection<TableRow>, PCollection<TableRow>> {
#Override
public PCollection<TableRow> expand(PCollection<TableRow> input) {
PCollection<KV<String, String>> largeWords = input.apply("ExtractLargeWords", ParDo.of(new ExtractLargeWordsFn()));
PCollection<KV<String, String>> wordNPlays = largeWords.apply("CombinePlays",Combine.perKey(new ConcatWordsCombineFn()));
wordNPlays.setCoder(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()));
PCollection<TableRow> rows = wordNPlays.apply("FormatToRow", ParDo.of(new FormatShakespeareOutputFn()));
return rows;
}
}
I would like to access the key in ConcatWordsCombineFn in order to do the final accumulation based on that. An example can be to join the words with , if the key begins with an a or use ; otherwise.
When looking at the programming guide
If you need the combining strategy to change based on the key (for example, MIN for some users and MAX for other users), you can define a KeyedCombineFn to access the key within the combining strategy.
I couldn't find KeyedCombineFn in org.apache.beam.sdk.transforms.Combine
I'm using Apache Beam 2.12.0 and Google Dataflow as a runner.
I don't think there is a built-in way to solve this. The straightforward workaround (not perfect, I know) is to wrap your string into another KV: KV<String, KV<String, String>> where both keys are the same.

Problem while implementing join of two dataset in google cloud dataflow using Apache Beam

I was trying to implement SQL on two dataset on google cloud storage using apache beam by following Apache Beam documentation https://beam.apache.org/documentation/dsls/sql/walkthrough/
But i am ended with the below exception :
An exception occured while executing the Java class. org.apache.beam.sdk.transforms.MapElements
.via(Lorg/apache/beam/sdk/transforms/SimpleFunction;)Lorg/apache/beam/sdk/transforms/MapElements;
I tried changing Beam-sdk-version and other code changes but none of them worked .
package com.nitesh.gcp.feature;
import org.apache.beam.runners.dataflow.options.DataflowPipelineOptions;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.extensions.sql.SqlTransform;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.schemas.Schema;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionTuple;
import org.apache.beam.sdk.values.Row;
import org.apache.beam.sdk.values.TupleTag;
import java.util.stream.Collectors;
public class beamSQL1 {
public static final String EMPHEADER = "empno,ename,job,mgr,hiredate,sal,comm,deptno";
public static final String DEPTHEADER = "deptno,dname,location";
public static final Schema EMPSCHEMA = Schema.builder()
.addStringField("empno")
.addStringField("ename")
.addStringField("job")
.addStringField("mgr")
.addStringField("hiredate")
.addStringField("sal")
.addStringField("comm")
.addStringField("deptno")
.build();
public static final Schema DEPTSCHEMA = Schema.builder()
.addStringField("deptno")
.addStringField("dname")
.addStringField("location")
.build();
public static void main(String[] args) {
PipelineOptionsFactory.register(DataflowPipelineOptions.class);
DataflowPipelineOptions options = PipelineOptionsFactory
.fromArgs(args)
.withValidation()
.as(DataflowPipelineOptions.class);
Pipeline pipeline = Pipeline.create(options);
PCollection<String> employee = pipeline.apply("Read From GCS", TextIO.read().from("gs://amazon-test/sqlData/employee.txt"));
PCollection<String> department = pipeline.apply("Read From GCS", TextIO.read().from("gs://amazon-test/sqlData/department.txt"));
PCollection<Row> employeeRow = employee.apply("Transform To Row", ParDo.of(new RowParDo())).setRowSchema(EMPSCHEMA);
PCollection<Row> departmentRow = department.apply("Transform To Row", ParDo.of(new RowParDoForDept())).setRowSchema(DEPTSCHEMA);
PCollectionTuple output = PCollectionTuple.of(new TupleTag<>("emp"), employeeRow).and(new TupleTag<>("dept"), departmentRow);
output.apply(
SqlTransform.query(
// "SELECT emp.empno,emp.ename,dept.deptno,dept.dname FROM emp JOIN dept ON emp.deptno = dept.deptno"))
"SELECT * from emp JOIN dept ON emp.deptno = dept.deptno"))
/* p2.apply("Transform Sql", SqlTransform.query(
"SELECT * " +
"FROM PCOLLECTION order by sal desc LIMIT 14")
)*/
.apply("TransForm To String", ParDo.of(new RowToString()))
.apply("Write To GCS", TextIO.write().to("gs://amazon-test/sqlData/output/outputSql.csv").withoutSharding());
pipeline.run();
}
//ParDo for String -> Row (SQL)
public static class RowParDo extends DoFn<String, Row> {
#ProcessElement
public void processElement(ProcessContext c) {
if (!c.element().equalsIgnoreCase(EMPHEADER)) {
String[] vals = c.element().split(",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)");
Row appRow = Row
.withSchema(EMPSCHEMA)
.addValues(vals[0], vals[1], vals[2], vals[3], vals[4], vals[5], vals[6], vals[7])
.build();
c.output(appRow);
}
}
}
//ParDo for Row (SQL) -> String
public static class RowToString extends DoFn<Row, String> {
#ProcessElement
public void processElement(ProcessContext c) {
String line = c.element().getValues()
.stream()
.map(Object::toString)
.collect(Collectors.joining(","));
c.output(line);
}
}
//ParDo for String -> Row (SQL)
public static class RowParDoForDept extends DoFn<String, Row> {
#ProcessElement
public void processElement(ProcessContext c) {
if (!c.element().equalsIgnoreCase(DEPTHEADER)) {
String[] vals = c.element().split(",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)");
Row appRow = Row
.withSchema(DEPTSCHEMA)
.addValues(vals[0], vals[1], vals[2])
.build();
c.output(appRow);
}
}
}
}

Java 8 Streams: List to Map with mapped values

I'm trying to create a Map from a List using Streams.
The key should be the name of the original item,
The value should be some derived data.
After .map() the stream consists of Integers and at the time of .collect() I can't access "foo" from the previous lambda. How do I get the original item in .toMap()?
Can this be done with Streams or do I need .forEach()?
(The code below is only for demonstration, the real code is of course much more complex and I can't make doSomething() a method of Foo).
import java.util.ArrayList;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
public class StreamTest {
public class Foo {
public String getName() {
return "FOO";
}
public Integer getValue() {
return 42;
}
}
public Integer doSomething(Foo foo) {
return foo.getValue() + 23;
}
public Map<String, Integer> run() {
return new ArrayList<Foo>().stream().map(foo -> doSomething(foo)).collect(Collectors.toMap(foo.getName, Function.identity()));
}
public static void main(String[] args) {
StreamTest streamTest = new StreamTest();
streamTest.run();
}
}
It appears to me it’s not that complicated. Am I missing something?
return Stream.of(new Foo())
.collect(Collectors.toMap(Foo::getName, this::doSomething));
I’m rather much into method references. If you prefer the -> notation, use
return Stream.of(new Foo())
.collect(Collectors.toMap(foo -> foo.getName(), foo -> doSomething(foo)));
Either will break (throw an exception) if there’s more than one Foo with the same name in your stream.

SpringJdbc RowMapper for nested 1:n entities using only one query and column aliases

I have many queries with many select fields and some nested entities. This a simplified version of nested entity structure:
public class OuterEntity{
private String name1;
private String name2;
private MiddleEntity middle;
//... get/set..
}
public class MiddleEntity{
private String surname1;
private String surname2;
private InnerEntity inner;
//... get/set..
}
public class InnerEntity{
private String nickname1;
private String nickname2;
//... get/set..
}
All entities have 1:n relationship, so I can write a single long query to get all data. I want to avoid multiple queries to get each single entity separately.
select
o.name1
o.name2
m.surname1
m.surname2
i.nickname1
i.nickname2
from outertable o
join middletable m on m.id=o.middle
join innertable i on i.id=m.inner
I wish to have a RowMapper for this mapping using column names aliases that can get and nest all entity. Maybe I can describe all nesting path with columns alias:
select
o.name1 as name1
o.name2 as name1
m.surname1 as middle_surname1
m.surname2 as middle_surname2
i.nickname1 as middle_inner_nickname1
i.nickname2 as middle_inner_nickname2
from outertable o
join middletable m on m.id=o.middle
join innertable i on i.id=m.inner
Do you think is it possibile? Does jdbctemplate provide something for this need?
I'm not asking to code a new RowMapper for me, I just want to know if exists something or better solution becase I think it is a very common problem
My actual solution is to get entities separately (one query per entity) and map them with BeanPropertyRowMapper. Another solution could be to write a different RowMapper for each query, but I will use this as last chance because I should write many different mapper for a common logic.
ORM frameworks like Hibernate is not an option for me.
I did not find nothing for now, I tried to write a custom BeanWrapper base on BeanPropertyRowMapper soruce.
import java.beans.PropertyDescriptor;
import java.math.BigDecimal;
import java.sql.Date;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.BeanWrapper;
import org.springframework.beans.NotWritablePropertyException;
import org.springframework.beans.PropertyAccessorFactory;
import org.springframework.dao.DataRetrievalFailureException;
import org.springframework.jdbc.core.BeanPropertyRowMapper;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.support.JdbcUtils;
/**
* #author tobia.scapin
*
* BeanRowMapper for nesting beans of 1:n entity this uses query aliases to build entity nesting.
* Field names should be exactly the same of bean property, respect cases and do not use underscore for field names
* "id" columnname/alias is used to check if a nested entity should be null.
*
* example:
* select
* a.p1 as property1
* b.id as entityname_id //<-- if this is values is null, the entity will be null
* b.p1 as entityname_property1
* b.p2 as entityname_property2
* c.id as entityname_subentity_id //<-- if this is values is null, the subentity will be null
* c.p1 as entityname_subentity_property1
* from a,b,c
*
* #param <T>
*/
public class NestedBeanAliasRowMapper<T> implements RowMapper<T> {
private static final String NESTING_SEPARATOR = "_";
private static final String NULLIZER_FIELD = "id";
#SuppressWarnings("rawtypes")
private final static List<Class> TYPES;
static{
TYPES=Arrays.asList(new Class[]{ int.class, boolean.class, byte.class, short.class, long.class, double.class, float.class, Boolean.class, Integer.class, Byte.class, Short.class, Long.class, BigDecimal.class, Double.class, Float.class, String.class, Date.class});
}
private Class<T> mappedClass;
private Map<String, PropertyDescriptor> mappedFields;
private Map<String, PropertyDescriptor> mappedBeans;
#SuppressWarnings("rawtypes")
private Map<Class,NestedBeanAliasRowMapper> mappersCache=new HashMap<Class,NestedBeanAliasRowMapper>();
private Map<String,BeanProp> beanproperties=null;
public NestedBeanAliasRowMapper(Class<T> mappedClass) {
initialize(mappedClass);
}
/**
* Initialize the mapping metadata for the given class.
* #param mappedClass the mapped class
*/
protected void initialize(Class<T> mappedClass) {
this.mappedClass = mappedClass;
mappersCache.put(mappedClass, this);
this.mappedFields = new HashMap<String, PropertyDescriptor>();
this.mappedBeans = new HashMap<String, PropertyDescriptor>();
PropertyDescriptor[] pds = BeanUtils.getPropertyDescriptors(mappedClass);
for (PropertyDescriptor pd : pds) {
if (pd.getWriteMethod() != null) {
if(TYPES.contains(pd.getPropertyType()))
this.mappedFields.put(pd.getName(), pd);
else
this.mappedBeans.put(pd.getName(), pd);
}
}
}
#Override
public T mapRow(ResultSet rs, int rowNumber) throws SQLException {
ResultSetMetaData rsmd = rs.getMetaData();
int columnCount = rsmd.getColumnCount();
List<Integer> cols=new ArrayList<Integer>();
for (int index = 1; index <= columnCount; index++)
cols.add(index);
return mapRow(rs, rowNumber, cols, "", true);
}
#SuppressWarnings({ "unchecked", "rawtypes" })
public T mapRow(ResultSet rs, int rowNumber, List<Integer> cols, String aliasPrefix, boolean root) throws SQLException {
T mappedObject = BeanUtils.instantiate(this.mappedClass);
BeanWrapper bw = PropertyAccessorFactory.forBeanPropertyAccess(mappedObject);
ResultSetMetaData rsmd = rs.getMetaData();
if(rowNumber==0) beanproperties=new HashMap<String,BeanProp>();
for (int index : cols) {
String column = JdbcUtils.lookupColumnName(rsmd, index);
if(aliasPrefix!=null && column.length()>aliasPrefix.length() && column.substring(0, aliasPrefix.length()).equals(aliasPrefix))
column=column.substring(aliasPrefix.length()); //remove the prefix from column-name
PropertyDescriptor pd = this.mappedFields.get(column);
if (pd != null) {
try {
Object value = getColumnValue(rs, index, pd);
if(!root && NULLIZER_FIELD.equals(column) && value==null)
return null;
bw.setPropertyValue(pd.getName(), value);
}
catch (NotWritablePropertyException ex) {
throw new DataRetrievalFailureException("Unable to map column '" + column + "' to property '" + pd.getName() + "'", ex);
}
}else if(rowNumber==0 && column.contains(NESTING_SEPARATOR)){
String[] arr=column.split(NESTING_SEPARATOR);
column=arr[0];
PropertyDescriptor bpd = this.mappedBeans.get(column);
if(bpd!=null){
BeanProp beanprop=beanproperties.get(column);
if(beanprop==null){
beanprop=new BeanProp();
beanprop.setClazz(bpd.getPropertyType());
beanproperties.put(column, beanprop);
}
beanprop.addIndex(index);
}
}
}
if(!beanproperties.isEmpty()) for (String beanname : beanproperties.keySet()) {
BeanProp beanprop=beanproperties.get(beanname);
NestedBeanAliasRowMapper mapper=mappersCache.get(beanprop.getClazz());
if(mapper==null){
mapper=new NestedBeanAliasRowMapper<>(beanprop.getClazz());
mappersCache.put(beanprop.getClazz(), mapper);
}
Object value = mapper.mapRow(rs, rowNumber, beanprop.getIndexes(), aliasPrefix+beanname+NESTING_SEPARATOR, false);
bw.setPropertyValue(beanname, value);
}
return mappedObject;
}
protected Object getColumnValue(ResultSet rs, int index, PropertyDescriptor pd) throws SQLException {
return JdbcUtils.getResultSetValue(rs, index, pd.getPropertyType());
}
public static <T> BeanPropertyRowMapper<T> newInstance(Class<T> mappedClass) {
return new BeanPropertyRowMapper<T>(mappedClass);
}
#SuppressWarnings("rawtypes")
private class BeanProp{
private Class clazz;
private List<Integer> indexes=new ArrayList<Integer>();
public Class getClazz() {
return clazz;
}
public void setClazz(Class clazz) {
this.clazz = clazz;
}
public List<Integer> getIndexes() {
return indexes;
}
public void addIndex(Integer index) {
this.indexes.add(index);
}
}
}

Resources