I m trying to run a pig script which is calling a User Defined Function written in java.I m trying to test this script with a very small file of 264Bytes. I end up getting java heap space errors and the job fails. I have tried running the job with the -Xms1024M option, it runs for the smaller files but fails with a larger file.
And even then my cluster is powerful enough to not trip over such small files, I wonder how i can fix this memory leak.
Can someone pls help,
import java.util.HashMap;
import java.lang.annotation.Annotation;
import java.lang.reflect.Array;
import java.lang.reflect.Method;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.Set;
import java.text.*;
import org.apache.pig.EvalFunc;
import org.apache.pig.data.*;
import com.tictactec.ta.lib.CoreAnnotated;
import com.tictactec.ta.lib.MAType;
import com.tictactec.ta.lib.MInteger;
import com.tictactec.ta.lib.RetCode;
import com.tictactec.ta.lib.meta.annotation.InputParameterInfo;
import com.tictactec.ta.lib.meta.annotation.InputParameterType;
import com.tictactec.ta.lib.meta.annotation.OptInputParameterInfo;
import com.tictactec.ta.lib.meta.annotation.OptInputParameterType;
import com.tictactec.ta.lib.meta.annotation.OutputParameterInfo;
import com.tictactec.ta.lib.meta.annotation.OutputParameterType;
public class taLib extends EvalFunc<DataBag>
{
private static final int MIN_ARGS = 3;
public static CoreAnnotated core = new CoreAnnotated();
private static Method func_ref = null;
public DecimalFormat df = new DecimalFormat("#.###");
public DataBag exec(Tuple args) throws IOException
{
DataBag input=null;
MInteger outStart = new MInteger();
MInteger outLen = new MInteger();
Map<String,Object>outputParams=new HashMap<String, Object>();
String func_name;
List<Integer> ip_colmns= new ArrayList<Integer>();
List<double[]>ip_list=new ArrayList<double[]>();
List<String>opt_type=new ArrayList<String>();
List<Object>opt_params=new ArrayList<Object>();
//////
long m1=Runtime.getRuntime().freeMemory();
System.out.println(m1);
long m2=Runtime.getRuntime().totalMemory();
System.out.println(m2);
//////
int ip_noofparams=0;
int op_noofparams=0;
int opt_noofparams=0;
if (args == null || args.size() < MIN_ARGS)
throw new IllegalArgumentException("talib: must have at least " +
MIN_ARGS + " args");
if(args.get(0) instanceof DataBag)
{input = (DataBag)args.get(0);}
else{throw new IllegalArgumentException("Only a valid bag name can be
passed");}
// get no of fields in bag
Tuple t0=input.iterator().next();
int fields_in_bag=t0.getAll().size();
if(args.get(1) instanceof String)
{func_name = (String)args.get(1);}
else{throw new IllegalArgumentException("Only valid function name can be
passed at arg 1");}
func_ref=methodChk(func_name);
if (func_ref == null) {
throw new IllegalArgumentException("talib: function "
+ func_name + " was not found");
}
for (Annotation[] annotations : func_ref.getParameterAnnotations())
{
for (Annotation annotation : annotations)
{
if(annotation instanceof InputParameterInfo)
{
InputParameterInfo inputParameterInfo =
(InputParameterInfo)annotation;
if(inputParameterInfo.type().equals(InputParameterType.TA_Input_Price))
{
ip_noofparams=numberOfSetBits(inputParameterInfo.flags());
}
else
{
ip_noofparams++;
}
}
if(annotation instanceof OptInputParameterInfo)
{
OptInputParameterInfo optinputParameterInfo=
(OptInputParameterInfo)annotation;
opt_noofparams++;
if
(optinputParameterInfo.type().equals(OptInputParameterType.TA_OptInput_IntegerRange))
{
opt_type.add("Integer");
}
else
if(optinputParameterInfo.type().equals(OptInputParameterType.TA_OptInput_RealRange))
{
opt_type.add("Double");
}
else
if(optinputParameterInfo.type().equals(OptInputParameterType.TA_OptInput_IntegerList))
{
opt_type.add("String");
}
else{throw new IllegalArgumentException("whoopsie ...serious
mess in opt_annotations");}
}
if (annotation instanceof OutputParameterInfo)
{
OutputParameterInfo outputParameterInfo =
(OutputParameterInfo) annotation;
op_noofparams++;
if
(outputParameterInfo.type().equals(OutputParameterType.TA_Output_Real))
{
outputParams.put(outputParameterInfo.paramName(), new
double[(int) input.size()]);
}
else if
(outputParameterInfo.type().equals(OutputParameterType.TA_Output_Integer))
{
outputParams.put(outputParameterInfo.paramName(), new
int[(int)input.size()]);
}
}
}
}
int total_params =ip_noofparams+opt_noofparams;
if((args.size()-2)!=total_params){throw new IllegalArgumentException("Wrong
no of argumets passed to UDF");}
// get the ip colmns no's
for(int i=2;i<(2+ip_noofparams);i++)
{
if(args.get(i) instanceof Integer )
{
if((Integer)args.get(i)>=0 && (Integer)args.get(i)<fields_in_bag)
{
ip_colmns.add((Integer) args.get(i));
}
else{throw new IllegalArgumentException("The input colmn specified
is invalid..please enter a valid colmn no:0-"+(fields_in_bag-1));}
}
else{throw new IllegalArgumentException("Wrong arguments entered:
Only"+ip_noofparams+"field no's of type(integer) allowed for fn"+func_name ); }
}
// create a list of ip arrays
for(int i=0;i<ip_colmns.size();i++)
{
ip_list.add((double[]) Array.newInstance(double.class, (int)input.size()));
}
int z=0;
int x=0;
// fill up the arrays
for(Tuple t1: input)
{
Iterator<double[]> itr=ip_list.iterator();
z=0;
while(itr.hasNext())
{
if((Double)t1.get(ip_colmns.get(z)) instanceof Double)
{
((double[])itr.next())[x]=(Double) t1.get(ip_colmns.get(z++));
}
else{throw new IllegalArgumentException("Illegal argument while
filling up array...only double typr allowed");}
}
x++;
}
//deal with opt params
int s=0;
for(int i=(2+ip_noofparams);i<(2+ip_noofparams+opt_noofparams);i++)
{
if(opt_type.get(s).equalsIgnoreCase(args.get(i).getClass().getSimpleName().toString()))
{
if(opt_type.get(s).equalsIgnoreCase("String"))
{
String m=args.get(i).toString().toLowerCase();
String ma=m.substring(0, 1).toUpperCase();
String mac=m.substring(1);
String macd=ma+mac;
MAType type =MAType.valueOf(macd);
opt_params.add(type);
s++;
}
else{
opt_params.add(args.get(i));
s++;
}
}
else if(opt_type.get(s).equalsIgnoreCase("Double"))
{
if(args.get(i).getClass().getSimpleName().toString().equalsIgnoreCase("Integer"))
{
opt_params.add((Double)((Integer)args.get(i)+0.0));
s++;
}
else{throw new IllegalArgumentException("Opt arguments do
not match for fn:"+func_name+", pls enter opt arguments in right order"); }
}
else{throw new IllegalArgumentException("Opt arguments do not match
for fn:"+func_name+", pls enter opt arguments in right order");}
}
List<Object> ta_argl = new ArrayList<Object>();
ta_argl.add(new Integer(0));
ta_argl.add(new Integer((int)input.size() - 1));
for(double[]in: ip_list)
{
ta_argl.add(in);
}
if(opt_noofparams!=0)
{ta_argl.addAll(opt_params);}
ta_argl.add(outStart);
ta_argl.add(outLen);
for (Map.Entry<String, Object> entry : outputParams.entrySet())
{
ta_argl.add(entry.getValue());
}
RetCode rc = RetCode.Success;
try {
rc = (RetCode)func_ref.invoke(core, ta_argl.toArray());
} catch (Exception e)
{
assert false : "I died in ta-lib, but Java made me a zombie...";
}
assert rc == RetCode.Success : "ret code from " + func_name;
if (outLen.value == 0) return null;
//////
DataBag ret=null;
ret =outTA(input,outputParams,outStart);
outputParams.clear();
ip_list.clear();
opt_params.clear();
opt_type.clear();
ip_colmns.clear();
Runtime.getRuntime().gc();
return ret;
}
public DataBag outTA(DataBag bag,Map<String, Object> outputParams,MInteger outStart)
{
DataBag nbag=null;
TupleFactory mTupleFactory=TupleFactory.getInstance();
BagFactory mBagFactory=BagFactory.getInstance();
nbag=mBagFactory.newDefaultBag();
Tuple tw=bag.iterator().next();
int fieldsintup=tw.getAll().size();
for(Tuple t0: bag)
{
Tuple t1=mTupleFactory.newTuple();
for(int z=0;z<fieldsintup;z++)
{
try {
t1.append(t0.get(z));
} catch (Exception e) {
// TODO Auto-generated catch block
System.out.println("Ouch");
}
}
nbag.add(t1);
}
int i = 0;
int j=0;
for (Tuple t2: nbag)
{
if(i>=outStart.value)
{
for(Map.Entry<String,Object>entry: outputParams.entrySet())
{
t2.append(entry.getKey().substring(3).toString());
if(entry.getValue() instanceof double[])
{
t2.append( new Double
(df.format(((double[])entry.getValue())[j])));
}
else if(entry.getValue() instanceof int[])
{
t2.append( ((int[])entry.getValue())[j]);
}
else{throw new
IllegalArgumentException(entry.getValue().getClass()+"not supported");}
}
i++;j++;
}
else
{t2.append(0.0);
i++;
}
}
return nbag;
}
public Method methodChk(String fn)
{
String fn_name=fn;
Method tmp_fn=null;
for (Method meth: core.getClass().getDeclaredMethods())
{
if (meth.getName().equalsIgnoreCase(fn_name))
{
tmp_fn = meth;
break;
}
}
return tmp_fn;
}
public int numberOfSetBits(int i) {
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return ((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
}
}
Probably a problem with the BZip codec - the API does note that it's rather memory hungry:
http://hadoop.apache.org/common/docs/r0.20.0/api/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.html
The compression requires large amounts of memory
When you increased the memory with -Xms2048m did you set the options for the pig grunt shell, or for the map/reduce jobs?
set mapred.child.java.opts=-Xmx2048m
You can check by looking in the JobTracker, find the job that failed, open the job.xml and locate the value of mapred.child.java.opts
Related
I am trying to write some Parquet records that contain LogicalTypes to JSON. I do this via AvroParquetReader, which gives me an Avro GenericRecord:
GenericData.get().addLogicalTypeConversion(new TimeConversions.TimeMillisConversion());
try (ParquetReader<GenericRecord> parquetReader =
AvroParquetReader.<GenericRecord>builder(new LocalInputFile(this.path))
.withDataModel(GenericData.get())
.build()) {
GenericRecord record = parquetReader.read();
record.toString();
}
record.toString() produces:
{"universe_member_id": 94639, "member_from_dt": 2001-08-31T00:00:00Z, "member_to_dt": 2200-01-01T00:00:00Z}
Notice that this is invalid JSON - the dates are correctly converted as per their LogicalType, but are not surrounded by quotes.
So instead I tried the JsonEncoder:
GenericData.get().addLogicalTypeConversion(new TimeConversions.TimeMillisConversion()); //etc
OutputStream stringOutputStream = new StringOutputStream();
try (ParquetReader<GenericRecord> parquetReader =
AvroParquetReader.<GenericRecord>builder(new LocalInputFile(this.path))
.withDataModel(GenericData.get())
.build()) {
GenericRecord record = parquetReader.read();
DatumWriter<GenericRecord> writer = new GenericDatumWriter<>(record.getSchema());
JsonEncoder encoder = EncoderFactory.get().jsonEncoder(record.getSchema(), stringOutputStream);
writer.write(record, encoder);
encoder.flush();
}
but this doesn't convert the date fields at all and bakes the datatype into every record:
{"universe_member_id":{"long":94639},"member_from_dt":{"long":999216000000000},"member_to_dt":{"long":7258118400000000}}
The output I'm looking for is:
{"universe_member_id": 94639, "member_from_dt": "2001-08-31T00:00:00Z", "member_to_dt": "2200-01-01T00:00:00Z"}
How can I correctly write a GenericRecord to JSON?
As you have indicated, the method toString() in class GenericRecord will give you a nearly valid JSON representation.
As you can see in the source code of the GenericData class, the GenericData.Record toString method just invoke the GenericData toString(Object) method in its implementation.
If you want a valid JSON representation of the record, you can take that code and, with minimal modifications, obtain the information that you need.
For instance, we can define an utility class like the following:
package stackoverflow.parquetavro;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.function.Function;
import org.apache.avro.LogicalType;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericContainer;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericEnumSymbol;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
public class GenericRecordJsonEncoder {
Map<LogicalType, Function<Object, Object>> logicalTypesConverters = new HashMap<>();
public void registerLogicalTypeConverter(LogicalType logicalType, Function<Object, Object> converter) {
this.logicalTypesConverters.put(logicalType, converter);
}
public Function<Object, Object> getLogicalTypeConverter(Schema.Field field) {
Schema fieldSchema = field.schema();
LogicalType logicalType = fieldSchema.getLogicalType();
return getLogicalTypeConverter(logicalType);
}
public Function<Object, Object> getLogicalTypeConverter(LogicalType logicalType) {
if (logicalType == null) {
return Function.identity();
}
return logicalTypesConverters.getOrDefault(logicalType, Function.identity());
}
public String serialize(GenericRecord value) {
StringBuilder buffer = new StringBuilder();
serialize(value, buffer, new IdentityHashMap<>(128) );
String result = buffer.toString();
return result;
}
private static final String TOSTRING_CIRCULAR_REFERENCE_ERROR_TEXT =
" \">>> CIRCULAR REFERENCE CANNOT BE PUT IN JSON STRING, ABORTING RECURSION <<<\" ";
/** Renders a Java datum as JSON. */
private void serialize(final Object datum, final StringBuilder buffer, final IdentityHashMap<Object, Object> seenObjects) {
if (isRecord(datum)) {
if (seenObjects.containsKey(datum)) {
buffer.append(TOSTRING_CIRCULAR_REFERENCE_ERROR_TEXT);
return;
}
seenObjects.put(datum, datum);
buffer.append("{");
int count = 0;
Schema schema = getRecordSchema(datum);
for (Schema.Field f : schema.getFields()) {
serialize(f.name(), buffer, seenObjects);
buffer.append(": ");
Function<Object, Object> logicalTypeConverter = getLogicalTypeConverter(f);
serialize(logicalTypeConverter.apply(getField(datum, f.name(), f.pos())), buffer, seenObjects);
if (++count < schema.getFields().size())
buffer.append(", ");
}
buffer.append("}");
seenObjects.remove(datum);
} else if (isArray(datum)) {
if (seenObjects.containsKey(datum)) {
buffer.append(TOSTRING_CIRCULAR_REFERENCE_ERROR_TEXT);
return;
}
seenObjects.put(datum, datum);
Collection<?> array = getArrayAsCollection(datum);
buffer.append("[");
long last = array.size()-1;
int i = 0;
for (Object element : array) {
serialize(element, buffer, seenObjects);
if (i++ < last)
buffer.append(", ");
}
buffer.append("]");
seenObjects.remove(datum);
} else if (isMap(datum)) {
if (seenObjects.containsKey(datum)) {
buffer.append(TOSTRING_CIRCULAR_REFERENCE_ERROR_TEXT);
return;
}
seenObjects.put(datum, datum);
buffer.append("{");
int count = 0;
#SuppressWarnings(value="unchecked")
Map<Object,Object> map = (Map<Object,Object>)datum;
for (Map.Entry<Object,Object> entry : map.entrySet()) {
serialize(entry.getKey(), buffer, seenObjects);
buffer.append(": ");
serialize(entry.getValue(), buffer, seenObjects);
if (++count < map.size())
buffer.append(", ");
}
buffer.append("}");
seenObjects.remove(datum);
} else if (isString(datum)|| isEnum(datum)) {
buffer.append("\"");
writeEscapedString(datum.toString(), buffer);
buffer.append("\"");
} else if (isBytes(datum)) {
buffer.append("{\"bytes\": \"");
ByteBuffer bytes = ((ByteBuffer) datum).duplicate();
writeEscapedString(StandardCharsets.ISO_8859_1.decode(bytes), buffer);
buffer.append("\"}");
} else if (((datum instanceof Float) && // quote Nan & Infinity
(((Float)datum).isInfinite() || ((Float)datum).isNaN()))
|| ((datum instanceof Double) &&
(((Double)datum).isInfinite() || ((Double)datum).isNaN()))) {
buffer.append("\"");
buffer.append(datum);
buffer.append("\"");
} else if (datum instanceof GenericData) {
if (seenObjects.containsKey(datum)) {
buffer.append(TOSTRING_CIRCULAR_REFERENCE_ERROR_TEXT);
return;
}
seenObjects.put(datum, datum);
serialize(datum, buffer, seenObjects);
seenObjects.remove(datum);
} else {
// This fallback is the reason why GenericRecord toString does not
// generate a valid JSON representation
buffer.append(datum);
}
}
// All these methods are also copied from the GenericData class source
private boolean isRecord(Object datum) {
return datum instanceof IndexedRecord;
}
private Schema getRecordSchema(Object record) {
return ((GenericContainer)record).getSchema();
}
private Object getField(Object record, String name, int position) {
return ((IndexedRecord)record).get(position);
}
private boolean isArray(Object datum) {
return datum instanceof Collection;
}
private Collection getArrayAsCollection(Object datum) {
return (Collection)datum;
}
private boolean isEnum(Object datum) {
return datum instanceof GenericEnumSymbol;
}
private boolean isMap(Object datum) {
return datum instanceof Map;
}
private boolean isString(Object datum) {
return datum instanceof CharSequence;
}
private boolean isBytes(Object datum) {
return datum instanceof ByteBuffer;
}
private void writeEscapedString(CharSequence string, StringBuilder builder) {
for(int i = 0; i < string.length(); i++){
char ch = string.charAt(i);
switch(ch){
case '"':
builder.append("\\\"");
break;
case '\\':
builder.append("\\\\");
break;
case '\b':
builder.append("\\b");
break;
case '\f':
builder.append("\\f");
break;
case '\n':
builder.append("\\n");
break;
case '\r':
builder.append("\\r");
break;
case '\t':
builder.append("\\t");
break;
default:
// Reference: http://www.unicode.org/versions/Unicode5.1.0/
if((ch>='\u0000' && ch<='\u001F') || (ch>='\u007F' && ch<='\u009F') || (ch>='\u2000' && ch<='\u20FF')){
String hex = Integer.toHexString(ch);
builder.append("\\u");
for(int j = 0; j < 4 - hex.length(); j++)
builder.append('0');
builder.append(hex.toUpperCase());
} else {
builder.append(ch);
}
}
}
}
}
In this class you can register converters for the logical types that you need. Consider the following example:
GenericRecordJsonEncoder encoder = new GenericRecordJsonEncoder();
// Register as many logical types converters as you need
encoder.registerLogicalTypeConverter(LogicalTypes.timestampMillis(), o -> {
final Instant instant = (Instant)o;
final String result = DateTimeFormatter.ISO_INSTANT.format(instant);
return result;
});
String json = encoder.serialize(genericRecord);
System.out.println(json);
This will provide you the desired result.
I have a custom velocity directive similar to the one described here.
package ca.sergiy.velocity;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import org.apache.velocity.context.InternalContextAdapter;
import org.apache.velocity.exception.MethodInvocationException;
import org.apache.velocity.exception.ParseErrorException;
import org.apache.velocity.exception.ResourceNotFoundException;
import org.apache.velocity.exception.TemplateInitException;
import org.apache.velocity.runtime.RuntimeServices;
import org.apache.velocity.runtime.directive.Directive;
import org.apache.velocity.runtime.log.Log;
import org.apache.velocity.runtime.parser.node.ASTBlock;
import org.apache.velocity.runtime.parser.node.Node;
public class TruncateBlockDirective extends Directive {
private Log log;
private int maxLength;
private String suffix;
private Boolean truncateAtWord;
public String getName() {
return "truncateBlock";
}
public int getType() {
return BLOCK;
}
#Override
public void init(RuntimeServices rs, InternalContextAdapter context, Node node) throws TemplateInitException {
super.init(rs, context, node);
log = rs.getLog();
//read dafault values from config
maxLength = rs.getInt("userdirective.truncateBlock.maxLength", 10);
suffix = rs.getString("userdirective.truncateBlock.suffix", "...");
truncateAtWord = rs.getBoolean("userdirective.truncateBlock.truncateAtWord", false);
}
public boolean render(InternalContextAdapter context, Writer writer, Node node)
throws IOException, ResourceNotFoundException, ParseErrorException, MethodInvocationException {
log.debug("truncateBlock directive render() call");
String truncateMe = null;
//default settings
int maxLength = this.maxLength;
String suffix = this.suffix;
Boolean truncateAtWord = this.truncateAtWord;
//loop through all "params"
for(int i=0; i<node.jjtGetNumChildren(); i++) {
if (node.jjtGetChild(i) != null ) {
if(!(node.jjtGetChild(i) instanceof ASTBlock)) {
//reading and casting inline parameters
if(i == 0) {
maxLength = (Integer)node.jjtGetChild(i).value(context);
} else if(i == 1) {
suffix = String.valueOf(node.jjtGetChild(i).value(context));
} else if(i == 2) {
truncateAtWord = (Boolean)node.jjtGetChild(i).value(context);
} else {
break;
}
} else {
//reading block content and rendering it
StringWriter blockContent = new StringWriter();
node.jjtGetChild(i).render(context, blockContent);
truncateMe = blockContent.toString();
break;
}
}
}
//truncate and write result to writer
try {
writer.write(truncate(truncateMe, maxLength, suffix, truncateAtWord));
} catch (Exception e) {
String msg = "Truncate failed";
log.error(msg, e);
throw new RuntimeException(msg, e);
}
return true;
}
//does actual truncating (taken directly from DisplayTools)
public String truncate(String truncateMe, int maxLength, String suffix,
boolean truncateAtWord) {
if (truncateMe == null || maxLength <= 0) {
return null;
}
if (truncateMe.length() <= maxLength) {
return truncateMe;
}
if (suffix == null || maxLength - suffix.length() <= 0) {
// either no need or no room for suffix
return truncateMe.substring(0, maxLength);
}
if (truncateAtWord) {
// find the latest space within maxLength
int lastSpace = truncateMe.substring(0, maxLength - suffix.length() + 1).lastIndexOf(" ");
if (lastSpace > suffix.length()) {
return truncateMe.substring(0, lastSpace) + suffix;
}
}
// truncate to exact character and append suffix
return truncateMe.substring(0, maxLength - suffix.length()) + suffix;
}
}
Is there any way to inject services using spring into such a directive, or the only possibility is to pass the beans as a directive parameter?
I was trying to make a method that returns true if given "Strings" are anagrams. unfortunately i cant even test it and i don know what is wrong. The markers at left says:
Multiple markers at this line
- Breakpoint:Test
- Duplicate modifier for the
type Test
Here is the source code:
package zajecia19;
import java.io.BufferedReader;
import java.io.FileReader;
import java.util.HashMap;
public
public class Test {
public static boolean Anagraamy(String s1, String s2) {
if (s1.length() != s2.length()) {
return false;
}
HashMap<Character, Integer> map = new HashMap<>();
for (int i = 0; i < s1.length(); i++) {
if (map.containsKey(s1.charAt(i))) {
map.put(s1.charAt(i), map.get(s1.charAt(i)) + 1);
} else {
map.put(s1.charAt(i), 1);
}
if (map.containsKey(s2.charAt(i))) {
map.put(s2.charAt(i), map.get(s2.charAt(i)) - 1);
} else {
map.put(s2.charAt(i), -1);
}
}
for( Integer value: map.values()){
if(value != 0 ){
return false;
}
}
return true;
}
public static void main(String[] args) {
try (BufferedReader br = new BufferedReader(new FileReader("slowa2"))) {
System.out.println( Anagraamy("abba", "babb"));
} catch (Exception e) {
e.printStackTrace();
}
}
}
Because you have
public
public
there.
The obvious way to fix that: remove the first one. And next time: pay attention to what the compiler is trying to tell you.
I am trying to perform an interval based search where I am loading from a file and trying to find the interval where my ipaddress lies. Below is my code. This code only works for long but not working for the ip address whose integer version is not a long number.
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Scanner;
import java.util.StringTokenizer;
import java.util.TreeMap;
public class RangeBasedSearchAsn {
public static class AsnInfo {
private long asn;
private String ipSubnet;
private String isp;
#Override
public String toString() {
return "Here are the details:\n"
+ this.asn + " " + this.ipSubnet + " " + this.isp ;
}
public AsnInfo(long asn, String ipSubnet, String isp) {
this.asn = asn;
this.ipSubnet = ipSubnet;
this.isp = isp;
}
public long getAsn() {
return asn;
}
public void setAsn(long asn) {
this.asn = asn;
}
public String getIpSubnet() {
return ipSubnet;
}
public void setIpSubnet(String ipSubnet) {
this.ipSubnet = ipSubnet;
}
public String getIsp() {
return isp;
}
public void setIsp(String isp) {
this.isp = isp;
}
}
public static class Range {
private long upper;
private AsnInfo asnInfo;
public Range(long upper, AsnInfo value) {
this.upper = upper;
this.asnInfo = value;
}
public long getUpper() {
return upper;
}
public void setUpper(long upper) {
this.upper = upper;
}
public AsnInfo getValue() {
return asnInfo;
}
public void setValue(AsnInfo value) {
this.asnInfo = value;
}
}
public static void main(String[] args) throws FileNotFoundException, IOException {
long key = 848163455L;
NavigableMap<Long, Range> asnTreeMap = new TreeMap<>();
System.out.println(System.currentTimeMillis());
System.out.println("Loading isp Map.");
FileInputStream inputStream = null;
Scanner sc = null;
try {
inputStream = new FileInputStream("C:\\Talend\\TalendTestArea\\rbl_ipv4_zone.txt");
sc = new Scanner(inputStream, "UTF-8");
while (sc.hasNextLine()) {
String line = sc.nextLine();
StringTokenizer st = new StringTokenizer(line, ";");
while (st.hasMoreTokens() && st.countTokens() == 7) {
st.nextToken();
st.nextToken();
long token1 = Long.parseLong(st.nextToken());
System.out.println("here is token1:" + token1);
long token2 = Long.parseLong(st.nextToken());
System.out.println("here is token1:" + token2);
long token3 = Long.parseLong(st.nextToken());
System.out.println("here is token1:" + token3);
asnTreeMap.put(token1, new Range(token2, new AsnInfo(token3,st.nextToken(),st.nextToken())));
}
}
if (sc.ioException() != null) {
throw sc.ioException();
}
} finally {
if (inputStream != null) {
inputStream.close();
}
if (sc != null) {
sc.close();
}
}
System.out.println("Loading Over.");
System.out.println(System.currentTimeMillis());
System.out.println("Starting Lookup.");
long[] ips = {30503936L};
for(int i = 0 ; i < ips.length;i++){
System.out.println(asnTreeMap.size());
Map.Entry<Long, Range> entry = asnTreeMap.floorEntry(ips[i]);
if (entry == null) {
System.out.println("Value not valid");
} else if (key <= entry.getValue().upper) {
System.out.println("Carrier = " + entry.getValue().asnInfo.toString() + "\n");
} else {
System.out.println("Not found");
}
System.out.println(System.currentTimeMillis());
}
}
}
Below is the output run: 1432262970924 Loading isp Map. Loading
Over. 1432262975089 Starting Lookup. 540772 Not found
1432262975089\n BUILD SUCCESSFUL (total time: 4 seconds)
An IP address is a 32-bit unsigned integer. In Java, ints are 32-bit signed integers.
If you use a signed int to represent an IP address, you'll have to accommodate into your code the fact that the upper half of all IP addresses will in fact be negative.
Java 7 doesn't provide built-in support for unsigned ints, so you'd have to implement the desired behavior or find another class wrapper (from somewhere) for Integer that fulfills your need.
Java 8 introduced methods in the Integer class for comparing ints as unsigned. See https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html for the appropriate methods.
hey
i have text file shown as below.
11/2/2010 cat 6
11/2/2010 cat 3
11/2/2010 dog 4
11/2/2010 cat 11
11/3/2010 cat 1
11/3/2010 dog 3
11/3/2010 cat 8
i have in every month this kind of text file. Above figure shows the part of the text file. so then i want to read this text using java to Jtable.
i Have used StringTokanizer And Arreaylist to ceate this. Unfotunately i couldn't done it. SO PLS HELP ME........
So i want below result to jTable using java program.
date animal total count
11/2/2010 cat 20 3
11/3/2010 cat 9 2
You don't need a StringTokenizer (in fact, it's not recommended). Just get the input line by line using BufferedReader, and do a String split:
List<Array> data = new ArrayList<Array>();
BufferedReader in = new BufferedReader(new FileReader("foo.in"));
String line;
// Read input and put into ArrayList of Arrays
while ((line = in.readLine) != null) {
data.add(line.split("\\s+"));
}
// Now create JTable with Array of Arrays
JTable table = new JTable(data.toArray(), new String[] {
"date", "animal", "total", "count"});
test with : http://crysol.org/es/node/819
or
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JTable;
import javax.swing.event.TableModelListener;
import javax.swing.table.TableModel;
public class Reader {
public Reader() {
// TODO Auto-generated constructor stub
JFrame frame = new JFrame();
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
JPanel panel = new JPanel();
BufferedReader reader;
try {
reader = new BufferedReader(new InputStreamReader(
new FileInputStream("sample.txt")));
Map<String, Object[]> result = new LinkedHashMap<String, Object[]>();
while (reader.ready()) {
String line = reader.readLine();
String[] values = line.split("\\s+");
String key = values[0] + "\t" + values[1];
String label = values[0];
String date = values[1];
Integer sum = 0;
Integer count = 0;
if (result.containsKey(key)) {
sum = (Integer) ((Object[]) result.get(key))[2];
count = (Integer) ((Object[]) result.get(key))[3];
} else {
}
result.put(key, new Object[]{label, date,
sum + Integer.parseInt(values[2]), count + 1});
}
ArrayList arrayList = new ArrayList(result.values());
/* interate and print new output */
/*
* for (String key : result.keySet()) { Integer sum =
* result.get(key); Integer count = result2.get(key);
* System.out.println(key + " " + sum + "\t" + count); }
*/
JTable table = new JTable(new AnimalTableModel(arrayList));
panel.add(new JScrollPane(table));
reader.close();
frame.setContentPane(panel);
frame.setVisible(true);
frame.pack();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
new Reader();
}
public class AnimalTableModel implements TableModel {
final Class[] columnClass = new Class[]{String.class, String.class,
Integer.class, Integer.class};
final String[] columnName = new String[]{"Date", "Animal", "Sum",
"Count"};
List values = null;
public AnimalTableModel(List values) {
this.values = values;
// initilize values
}
#Override
public void addTableModelListener(TableModelListener l) {
}
#Override
public Class<?> getColumnClass(int columnIndex) {
return columnClass[columnIndex];
}
#Override
public int getColumnCount() {
return columnClass.length;
}
#Override
public String getColumnName(int columnIndex) {
return columnName[columnIndex];
}
#Override
public int getRowCount() {
return values.size();
}
#Override
public Object getValueAt(int rowIndex, int columnIndex) {
return ((Object[]) values.get(rowIndex))[columnIndex];
}
#Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return false;
}
#Override
public void removeTableModelListener(TableModelListener l) {
}
#Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
// TODO FOR EDITABLE DT
}
}
}
You will have to populate the data to a map from the given file.I will give you an example as to how to populate the data.
public class AnimalMapping {
public static void main(String[] args) {
Object[][] data = { { "11/2/2010", "cat", 6 },
{ "11/2/2010", "cat", 3 }, { "11/2/2010", "dog", 4 },
{ "11/2/2010", "cat", 11 }, { "11/3/2010", "cat", 1 },
{ "11/3/2010", "dog", 3 }, { "11/3/2010", "cat", 8 } };
HashMap<String, Map<String, AnimalValCnt>> animalMap = new HashMap<String, Map<String, AnimalValCnt>>();
for (Object[] record : data) {
Map<String, AnimalValCnt> innerMap = null;
if ((innerMap = animalMap.get(record[0])) == null) {
innerMap = new HashMap<String, AnimalValCnt>();
animalMap.put((String) record[0], innerMap);
}
AnimalValCnt obj = null;
if ((obj = innerMap.get(record[1])) == null) {
obj = new AnimalValCnt();
innerMap.put((String) record[1], obj);
}
obj.Sumval += (Integer) record[2];
obj.cnt++;
}
System.out.println(animalMap);
}
}
class AnimalValCnt {
int Sumval;
int cnt;
#Override
public String toString() {
return "(" + Sumval + "," + cnt + ")";
}
}
Once you have got the data in a map then it's easy to populate these data to a table.You can use a tablemodel for this purpose.Have a look at this code to understand how data from a map can be loaded into a table using TableModel.
UPDATE:
public class AnimalMapping {
public static void main(String[] args) throws NumberFormatException, IOException {
BufferedReader in = new BufferedReader(new FileReader("foo.in"));
String line;
String[] record;
HashMap<String, Map<String, AnimalValCnt>> animalMap = new HashMap<String, Map<String, AnimalValCnt>>();
while(((line = in.readLine()) != null)) {
record=line.split("\\s+");
Map<String, AnimalValCnt> innerMap = null;
if ((innerMap = animalMap.get(record[0])) == null) {
innerMap = new HashMap<String, AnimalValCnt>();
animalMap.put(record[0], innerMap);
}
AnimalValCnt obj = null;
if ((obj = innerMap.get(record[1])) == null) {
obj = new AnimalValCnt();
innerMap.put(record[1], obj);
}
obj.Sumval += Integer.valueOf(record[2]);
obj.cnt++;
}
System.out.println(animalMap);
}
}
class AnimalValCnt {
int Sumval;
int cnt;
#Override
public String toString() {
return "(" + Sumval + "," + cnt + ")";
}
}
#Dilantha Chamal: Reading from file was already given by Box9.I just included it in my code.You should be putting some effort here.Maybe you are a beginner that's why I wrote the code.Now try to implement the TableModel by yourself.Just a friendly advice:unless you do it you are never going to learn.
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import com.google.common.base.CharMatcher;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import com.google.common.io.LineProcessor;
public class AnimalSummaryBuilder
{
private static final Splitter SPLITTER = Splitter.on(CharMatcher.anyOf(","));
private static final Joiner JOINER = Joiner.on("\t");
#SuppressWarnings("unchecked")
public static void main(final String[] args) throws Exception
{
#SuppressWarnings("rawtypes")
Map<Animal, Summary> result = Files.readLines(new File("c:/1.txt"), Charsets.ISO_8859_1, new LineProcessor() {
private final Map<Animal, Summary> result = Maps.newHashMap();
public Object getResult()
{
return result;
}
public boolean processLine(final String line) throws IOException
{
Iterator<String> columns = SPLITTER.split(line).iterator();
String date = columns.next();
String name = columns.next();
int value = Integer.valueOf(columns.next()).intValue();
Animal currentRow = new Animal(date, name);
if (result.containsKey(currentRow))
{
Summary summary = result.get(currentRow);
summary.increaseCount();
summary.addToTotal(value);
}
else
{
Summary initialSummary = new Summary();
initialSummary.setCount(1);
initialSummary.setTotal(value);
result.put(currentRow, initialSummary);
}
return true;
}
});
for (Map.Entry<Animal, Summary> entry : result.entrySet())
{
Animal animal = entry.getKey();
Summary summary = entry.getValue();
System.out.println(JOINER.join(animal.date, animal.name, summary.total, summary.count));
}
}
final static class Animal
{
String date;
String name;
public Animal(final String date, final String n)
{
this.date = date;
this.name = n;
}
#Override
public int hashCode()
{
final int prime = 31;
int result = 1;
result = prime * result + ((date == null) ? 0 : date.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
#Override
public boolean equals(Object obj)
{
if (this == obj)
{
return true;
}
if (obj == null)
{
return false;
}
if (!(obj instanceof Animal))
{
return false;
}
Animal other = (Animal) obj;
if (date == null)
{
if (other.date != null)
{
return false;
}
}
else if (!date.equals(other.date))
{
return false;
}
if (name == null)
{
if (other.name != null)
{
return false;
}
}
else if (!name.equals(other.name))
{
return false;
}
return true;
}
}
final static class Summary
{
private int total;
private int count;
void setTotal(int value)
{
total = value;
}
void setCount(int i)
{
count = i;
}
void increaseCount()
{
count++;
}
void addToTotal(int valueToAdd)
{
total += valueToAdd;
}
}
}