IN OR multiple operator SAP Java - java

I am creating a query using JCO, SAP util for the following code for example:
public static void TEST() throws JCoException {
JCoDestination destination;
JCoRepository sapRepository;
destination = JCoDestinationManager.getDestination(ABAP_AS);
JCoDestinationManager.getDestination(ABAP_AS);
System.out.println("Attributes:");
System.out.println(destination.getAttributes());
System.out.println();
try {
JCoContext.begin(destination);
sapRepository = destination.getRepository();
if (sapRepository == null) {
System.out.println("Couldn't get repository!");
System.exit(0);
}
JCoFunctionTemplate functionTemplate = sapRepository.getFunctionTemplate("EM_GET_NUMBER_OF_ENTRIES");
JCoFunction function = functionTemplate.getFunction();
JCoTable itTable = function.getTableParameterList().getTable("IT_TABLES");
itTable.appendRow();
itTable.setValue("TABNAME", "USR02");
// JCoTable returnOptions_ = function.getTableParameterList().getTable("OPTIONS");
// returnOptions_.appendRow();
//// //returnOptions.setValue("TEXT", "MODDA GE '20140908' AND MODTI GT '000000'");
// returnOptions_.setValue("TEXT", "BNAME EQ 'USER'");
function.execute(destination);
System.out.println( function.getTableParameterList().getTable("IT_TABLES").getInt("TABROWS"));
JCoFunctionTemplate template2 = sapRepository.getFunctionTemplate("RFC_READ_TABLE");
System.out.println("Getting template");
JCoFunction function2 = template2.getFunction();
function2.getImportParameterList().setValue("QUERY_TABLE", "USR02");
function2.getImportParameterList().setValue("DELIMITER", ",");
function2.getImportParameterList().setValue( "ROWCOUNT",5);
function2.getImportParameterList().setValue( "ROWSKIPS",5);
System.out.println("Setting OPTIONS");
// Date date = new Date(1410152400000L);
SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss");
// String dateString = formatter.format(date);
// String dt = dateString.substring(0, 8);
// String tm = dateString.substring(8);
// System.out.println("dt > " + dt + ", tm > " + tm);
JCoTable returnOptions = function2.getTableParameterList().getTable("OPTIONS");
returnOptions.appendRow();
//returnOptions.setValue("TEXT", "MODDA GE '20140908' AND MODTI GT '000000'");
returnOptions.setValue("TEXT", "BNAME LIKE 'S%'");
// returnOptions.appendRow();
// returnOptions.setValue("TEXT", "AND TYPE = 'DN'");
System.out.println("Setting FIELDS");
JCoTable returnFields = function2.getTableParameterList().getTable("FIELDS");
returnFields.appendRow();
returnFields.setValue("FIELDNAME", "BNAME");
returnFields.appendRow();
returnFields.setValue("FIELDNAME", "GLTGB");
returnFields.appendRow();
returnFields.setValue("FIELDNAME", "CLASS");
// returnFields.appendRow();
function2.execute(destination);
// JCoTable jcoTablef = function2.getTableParameterList().getTable("FIELDS");
JCoTable jcoTabled = function2.getTableParameterList().getTable("DATA");
int icodeOffSet = 0;
int icodeLength = 0;
int numRows = jcoTabled.getNumRows();
System.out.println("numRows > " + numRows);
for(int i=0; i<numRows; i++) {
jcoTabled.setRow(i);
System.out.println(jcoTabled.getRow());
String BNAME = "BNAE:" + jcoTabled.getString(0);
// String GLTGB = "GLTGB:" + jcoTabled.getString(2);
// String cls = "GLTGB:" + jcoTabled.getString(3);
System.out.println(BNAME + "..." );
}
} catch (Exception e) {
e.printStackTrace();
System.out.println("ERROR: " + e.getMessage());
} finally {
JCoContext.end(destination);
}
}
static void createDestinationDataFile(String destinationName, Properties connectProperties)
{
File destCfg = new File(destinationName+".jcoDestination");
try
{
FileOutputStream fos = new FileOutputStream(destCfg, false);
connectProperties.store(fos, "for tests only !");
fos.close();
}
catch (Exception e)
{
throw new RuntimeException("Unable to create the destination files", e);
}
}
The previous code worked well when I used the EQ operator.
However, when I used the IN operator:
BNAME IN ('USER1','USER','USER3')
or
BNAME EQ 'USER1' OR BNAME EQ 'USER' OR BNAME EQ 'USER3'
It throws an exception: Unexpected dynamic condition
Are there any limitations to the condition size? Since I have 22 field in the IN condition and each value has a size of 10?

You need to specify a valid OpenSQL condition, you need to observe the rules for dynamic conditions and you need to ensure that the condition is properly split into lines of 72 characters. My guess would be that the last bit might have been an issue if you're specifying 22 conditions...

Related

twitter4j search for full 7 day range

I try to save tweets with keywords, I know that free API gives only 7 days of the result, but it never gets any set of a timeline greater than few hours, sometimes it even gives me a range of an hour. I did set since() and until() to the searching query. The maximum number of the tweets I've got from a single run was less than 400. And can anyone tell me why it stopped automatically with such few results? Thanks.
public static void main(String[] args) throws TwitterException {
String KEY_word;
String Exclude;
String Since;
String Until;
String OPT_dir;
String time;
int x;
Propertyloader confg = new Propertyloader();
KEY_word = confg.getProperty("KEY_word");
Exclude = confg.getProperty("Exclude");
Since = confg.getProperty("Since");
Until = confg.getProperty("Until");
OPT_dir = confg.getProperty("OPT_dir");
Twitter twitter = new TwitterFactory().getInstance();
try {
time = new SimpleDateFormat("yyyyMMddHHmm'.txt'").format(new Date());
x = 1;
Query query = new Query(KEY_word + Exclude);
query.since(Since);
query.until(Until);
QueryResult result;
do {
result = twitter.search(query);
List<Status> tweets = result.getTweets();
for (Status tweet : tweets) {
try {
String filedir = OPT_dir + KEY_word + time;
writeStringToFile(filedir, x + ". " + "#" + tweet.getUser().getScreenName() + ", At: " + tweet.getCreatedAt() + ", Rt= " + tweet.getRetweetCount() + ", Text: " + tweet.getText());
x += 1;
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
} while ((query = result.nextQuery()) != null);
System.exit(0);
} catch (TwitterException te) {
te.printStackTrace();
System.out.println("Failed to search tweets: " + te.getMessage());
System.exit(-1);
}
}
public static void writeStringToFile(String filePathAndName, String stringToBeWritten) throws IOException{
try
{
String filename= filePathAndName;
boolean append = true;
FileWriter fw = new FileWriter(filename,append);
fw.write(stringToBeWritten);//appends the string to the file
fw.write("\n" +"\n");
fw.close();
}
catch(IOException ioe)
{
System.err.println("IOException: " + ioe.getMessage());
}
}
You can get more tweets by using setMaxId. Here is an example :
long lowestTweetId = Long.MAX_VALUE;
x = 1;
Query query = new Query("stackoverflow");
query.since("2018-08-10");
query.until("2018-08-16");
query.setCount(100); //The number of tweets to return per page, up to a maximum of 100. Defaults to 15. https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets.html
query.setResultType(Query.RECENT); // to get an order
int searchResultCount=100;
QueryResult result;
do {
result = twitter.search(query);
List<Status> tweets = result.getTweets();
for (Status tweet : tweets) {
try {
System.out.println( "#" + tweet.getUser().getScreenName() + ", At: " + tweet.getCreatedAt() );
x += 1;
if (tweet.getId() < lowestTweetId) {
lowestTweetId = tweet.getId();
query.setMaxId(lowestTweetId-1);
}
else {// each new maxid should be smaller than the other one so break here
//do whatever you want to handle it ex: break from two loops
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
} while (searchResultCount != 0 );

100 records at a time to udf

I have to pass record to an UDF which calls an API but as we want to do it parallely,we are using spark and thats why UDF is being developed, the problem here is that that UDF needs to take only 100 records at a time not more than that, it can't handle more than 100 records parallely, so how to ensure that only 100 record pass to it in one go please note we don't want to use count() function on whole record.
I am attaching the UDF code here,it's a generic UDF which returns array of struct.moreover if we pass 100 records in batchsize variable each time then,if suppose there are 198 records then if as we dont want to use count() we will not be knowing that its last batchsize is going to be 98.so how to handle that thing.
Guys... I have a generic UDF in which call is made for an API but before calling it creates batch of 100 firstly then only call restapi.. So the argument UDF takes are x1:string, x2:string, batchsize:integer(currently the batchsize is 100)..so in UDF until and unless the batchsize is not 100 the call will not happen.. And for each record it will return null.
So till 99th record it will return. Null but at 100th record the call will happen
[So, now the problem part:as we are taking batchsize 100 and call will take place only at 100th record. So, in condition like if we have suppose 198 record in file then 100 record will get the output but, other 98 will only return null as they will not get processed..
So please help a way around, and UDF take one record at a time, but it keep on collecting till 100th record.. I hope this clears up
public class Standardize_Address extends GenericUDF {
private static final Logger logger = LoggerFactory.getLogger(Standardize_Address.class);
private int counter = 0;
Client client = null;
private Batch batch = new Batch();
public Standardize_Address() {
client = new ClientBuilder().withUrl("https://ss-staging-public.beringmedia.com/street-address").build();
}
// StringObjectInspector streeti;
PrimitiveObjectInspector streeti;
PrimitiveObjectInspector cityi;
PrimitiveObjectInspector zipi;
PrimitiveObjectInspector statei;
PrimitiveObjectInspector batchsizei;
private ArrayList ret;
#Override
public String getDisplayString(String[] argument) {
return "My display string";
}
#Override
public ObjectInspector initialize(ObjectInspector[] args) throws UDFArgumentException {
System.out.println("under initialize");
if (args[0] == null) {
throw new UDFArgumentTypeException(0, "NO Street is mentioned");
}
if (args[1] == null) {
throw new UDFArgumentTypeException(0, "No Zip is mentioned");
}
if (args[2] == null) {
throw new UDFArgumentTypeException(0, "No city is mentioned");
}
if (args[3] == null) {
throw new UDFArgumentTypeException(0, "No State is mentioned");
}
if (args[4] == null) {
throw new UDFArgumentTypeException(0, "No batch size is mentioned");
}
/// streeti =args[0];
streeti = (PrimitiveObjectInspector)args[0];
// this.streetvalue = (StringObjectInspector) streeti;
cityi = (PrimitiveObjectInspector)args[1];
zipi = (PrimitiveObjectInspector)args[2];
statei = (PrimitiveObjectInspector)args[3];
batchsizei = (PrimitiveObjectInspector)args[4];
ret = new ArrayList();
ArrayList structFieldNames = new ArrayList();
ArrayList structFieldObjectInspectors = new ArrayList();
structFieldNames.add("Street");
structFieldNames.add("city");
structFieldNames.add("zip");
structFieldNames.add("state");
structFieldObjectInspectors.add(PrimitiveObjectInspectorFactory.writableStringObjectInspector);
structFieldObjectInspectors.add(PrimitiveObjectInspectorFactory.writableStringObjectInspector);
structFieldObjectInspectors.add(PrimitiveObjectInspectorFactory.writableStringObjectInspector);
structFieldObjectInspectors.add(PrimitiveObjectInspectorFactory.writableStringObjectInspector);
StructObjectInspector si2 = ObjectInspectorFactory.getStandardStructObjectInspector(structFieldNames,
structFieldObjectInspectors);
ListObjectInspector li2;
li2 = ObjectInspectorFactory.getStandardListObjectInspector(si2);
return li2;
}
#Override
public Object evaluate(DeferredObject[] args) throws HiveException {
ret.clear();
System.out.println("under evaluate");
// String street1 = streetvalue.getPrimitiveJavaObject(args[0].get());
Object oin = args[4].get();
System.out.println("under typecasting");
int batchsize = (Integer) batchsizei.getPrimitiveJavaObject(oin);
System.out.println("batchsize");
Object oin1 = args[0].get();
String street1 = (String) streeti.getPrimitiveJavaObject(oin1);
Object oin2 = args[1].get();
String zip1 = (String) zipi.getPrimitiveJavaObject(oin2);
Object oin3 = args[2].get();
String city1 = (String) cityi.getPrimitiveJavaObject(oin3);
Object oin4 = args[3].get();
String state1 = (String) statei.getPrimitiveJavaObject(oin4);
logger.info("address passed, street=" + street1 + ",zip=" + zip1 + ",city=" + city1 + ",state=" + state1);
counter++;
try {
System.out.println("under try");
Lookup lookup = new Lookup();
lookup.setStreet(street1);
lookup.setCity(city1);
lookup.setState(state1);
lookup.setZipCode(zip1);
lookup.setMaxCandidates(1);
batch.add(lookup);
} catch (BatchFullException ex) {
logger.error(ex.getMessage(), ex);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
/* batch.add(lookup); */
if (counter == batchsize) {
System.out.println("under if");
try {
logger.info("batch input street " + batch.get(0).getStreet());
try {
client.send(batch);
} catch (Exception e) {
logger.error(e.getMessage(), e);
logger.warn("skipping current batch, continuing with the next batch");
batch.clear();
counter = 0;
return null;
}
Vector<Lookup> lookups = batch.getAllLookups();
for (int i = 0; i < batch.size(); i++) {
// ListObjectInspector candidates;
ArrayList<Candidate> candidates = lookups.get(i).getResult();
if (candidates.isEmpty()) {
logger.warn("Address " + i + " is invalid.\n");
continue;
}
logger.info("Address " + i + " is valid. (There is at least one candidate)");
for (Candidate candidate : candidates) {
final Components components = candidate.getComponents();
final Metadata metadata = candidate.getMetadata();
logger.info("\nCandidate " + candidate.getCandidateIndex() + ":");
logger.info("Delivery line 1: " + candidate.getDeliveryLine1());
logger.info("Last line: " + candidate.getLastLine());
logger.info("ZIP Code: " + components.getZipCode() + "-" + components.getPlus4Code());
logger.info("County: " + metadata.getCountyName());
logger.info("Latitude: " + metadata.getLatitude());
logger.info("Longitude: " + metadata.getLongitude());
}
Object[] e;
e = new Object[4];
e[0] = new Text(candidates.get(i).getComponents().getStreetName());
e[1] = new Text(candidates.get(i).getComponents().getCityName());
e[2] = new Text(candidates.get(i).getComponents().getZipCode());
e[3] = new Text(candidates.get(i).getComponents().getState());
ret.add(e);
}
counter = 0;
batch.clear();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return ret;
} else {
return null;
}
}
}

How do you write to a file in java?

This is a code snippet that shows me trying to write to a file.
public void printContents() {
int i = 0;
try {
FileReader fl = new FileReader("Product List.txt");
Scanner scn = new Scanner(fl);
while (scn.hasNext()) {
String productName = scn.next();
double productPrice = scn.nextDouble();
int productAmount = scn.nextInt();
System.out.println(productName + " is " + productPrice + " pula. There are " + productAmount + " items left in stalk.");
productList[i] = new ReadingAndWritting(productName, productPrice, productAmount);
i = i + 1;
}
scn.close();
} catch (IOException exc) {
exc.printStackTrace();
} catch (Exception exc) {
exc.printStackTrace();
}
}
public void writeContents() {
try {
//FileOutputStream formater = new FileOutputStream("Product List.txt",true);
Formatter writer = new Formatter(new FileOutputStream("Product List.txt", false));
for (int i = 0; i < 2; ++i) {
writer.format(productList[i].name + "", (productList[i].price + 200.0 + ""), (productList[i].number - 1), "\n");
}
writer.close();
} catch (Exception exc) {
exc.printStackTrace();
}
}
The exception thrown while trying to run this code is:
java.util.NoSuchElementException at ReadingAndWritting.printContents(ReadingAndWritting.java:37).
I tried multiple things and only ended up with: "cokefruitgushersAlnassma" in the file. What I want is:
coke 7.95 10
fruitgushers 98.00 6
Alnassma 9.80 7
The Problem seems to be in
String productName = scn.next();
// Here:
double productPrice = scn.nextDouble();
// And here:
int productAmount = scn.nextInt();
After scn.next(), you don't check if scn.hasNext() before requesting the next element (double or int, respectively). So, either your file is not complete, or not in the exact structure you expect, or you just missed the two additional checks before trying to work with data which just isn't there.
Solution could be like:
while (scn.hasNext()) {
String productName = scn.next();
if ( scn.hasNext() ) {
double productPrice = scn.nextDouble();
if ( scn.hasNext() ) {
int productAmount = scn.nextInt();
// Do something with the three values read...
} else {
// Premature end of file(?)
}
} else {
// Premature end of file(?)
}
}

OpenCSV + JMS/MDB behavior + performance issue

I have an web application, that runs under Glassfish 4.1, that contains a couple of features that require JMS/MDB.
In particular I am having problems regarding the generation of a report using JMS/MDB, that is, obtain data from a table and dump them in a file.
This is what happens, i have a JMS/MDB message that does a couple tasks in an Oracle database and after having the final result in a table, i would like to obtain a csv report from that table (which usually is 30M+ records).
So while in JMS/MDB this is what happens to generate the report:
public boolean handleReportContent() {
Connection conn = null;
try {
System.out.println("Handling report content... " + new Date());
conn = DriverManager.getConnection(data.getUrl(), data.getUsername(), data.getPassword());
int reportLine = 1;
String sql = "SELECT FIELD_NAME, VALUE_A, VALUE_B, DIFFERENCE FROM " + data.getDbTableName() + " WHERE SET_PK IN ( SELECT DISTINCT SET_PK FROM " + data.getDbTableName() + " WHERE IS_VALID=? )";
PreparedStatement ps = conn.prepareStatement(sql);
ps.setBoolean(1, false);
ResultSet rs = ps.executeQuery();
List<ReportLine> lst = new ArrayList<>();
int columns = data.getLstFormats().size();
int size = 0;
int linesDone = 0;
while (rs.next()) {
ReportLine rl = new ReportLine(reportLine, rs.getString("FIELD_NAME"), rs.getString("VALUE_A"), rs.getString("VALUE_B"), rs.getString("DIFFERENCE"));
lst.add(rl);
linesDone = columns * (reportLine - 1);
size++;
if ((size - linesDone) == columns) {
reportLine++;
if (lst.size() > 4000) {
appendReportContentNew(lst);
lst.clear();
}
}
}
if (lst.size() > 0) {
appendReportContentNew(lst);
lst.clear();
}
ps.close();
conn.close();
return true;
} catch (Exception e) {
System.out.println("exception handling report content new: " + e.toString());
return false;
}
This is working, i am aware it is slow and inneficient and most likely there is a better option to perform the same operation.
What this method does is:
collect the data from the ResultSet;
dump it in a List;
for each 4K objects will call the method appendReportContentNew()
dump the data in the List for the file
public void appendReportContentNew(List<ReportLine> lst) {
File f = new File(data.getJobFilenamePath());
try {
if (!f.exists()) {
f.createNewFile();
}
FileWriter fw = new FileWriter(data.getJobFilenamePath(), true);
BufferedWriter bw = new BufferedWriter(fw);
for (ReportLine rl : lst) {
String rID = "R" + rl.getLine();
String fieldName = rl.getFieldName();
String rline = rID + "," + fieldName + "," + rl.getValue1() + "," + rl.getValue2() + "," + rl.getDifference();
bw.append(rline);
bw.append("\n");
}
bw.close();
} catch (IOException e) {
System.out.println("exception appending report content: " + e.toString());
}
}
With this method, in 20 minutes, it wrote 800k lines (30Mb file) it usually goes to 4Gb or more. This is what i want to improve, if possible.
So i decided to try OpenCSV, and i got the following method:
public boolean handleReportContentv2() {
Connection conn = null;
try {
FileWriter fw = new FileWriter(data.getJobFilenamePath(), true);
System.out.println("Handling report content v2... " + new Date());
conn = DriverManager.getConnection(data.getUrl(), data.getUsername(), data.getPassword());
String sql = "SELECT NLINE, FIELD_NAME, VALUE_A, VALUE_B, DIFFERENCE FROM " + data.getDbTableName() + " WHERE SET_PK IN ( SELECT DISTINCT SET_PK FROM " + data.getDbTableName() + " WHERE IS_VALID=? )";
PreparedStatement ps = conn.prepareStatement(sql);
ps.setBoolean(1, false);
ps.setFetchSize(500);
ResultSet rs = ps.executeQuery();
BufferedWriter out = new BufferedWriter(fw);
CSVWriter writer = new CSVWriter(out, ',', CSVWriter.NO_QUOTE_CHARACTER);
writer.writeAll(rs, false);
fw.close();
writer.close();
rs.close();
ps.close();
conn.close();
return true;
} catch (Exception e) {
System.out.println("exception handling report content v2: " + e.toString());
return false;
}
}
So I am collecting all the data from the ResultSet, and dumping in the CSVWriter. This operation for the same 20 minutes, only wrote 7k lines.
But the same method, if I use it outside the JMS/MDB, it has an incredible difference, just for the first 4 minutes it wrote 3M rows in the file.
For the same 20 minutes, it generated a file of 500Mb+.
Clearly using OpenCSV is by far the best option if i want to improve the performance, my question is why it doesn't perform the same way inside the JMS/MDB?
If it is not possible is there any possible solution to improve the same task by any other way?
I appreciate the feedback and help on this matter, i am trying to understand the reason why the behavior/performance is different in/out of the JMS/MDB.
**
EDIT:
**
#MessageDriven(activationConfig = {
#ActivationConfigProperty(propertyName = "destinationType", propertyValue = "javax.jms.Queue"),
#ActivationConfigProperty(propertyName = "destinationLookup", propertyValue = "MessageQueue")})
public class JobProcessorBean implements MessageListener {
private static final int TYPE_A_ID = 0;
private static final int TYPE_B_ID = 1;
#Inject
JobDao jobsDao;
#Inject
private AsyncReport generator;
public JobProcessorBean() {
}
#Override
public void onMessage(Message message) {
int jobId = -1;
ObjectMessage msg = (ObjectMessage) message;
try {
boolean valid = true;
JobWrapper jobw = (JobWrapper) msg.getObject();
jobId = jobw.getJob().getJobId().intValue();
switch (jobw.getJob().getJobTypeId().getJobTypeId().intValue()) {
case TYPE_A_ID:
jobsDao.updateJobStatus(jobId, 0);
valid = processTask1(jobw);
if(valid) {
jobsDao.updateJobFileName(jobId, generator.getData().getJobFilename());
System.out.println(":: :: JOBW FileName :: "+generator.getData().getJobFilename());
jobsDao.updateJobStatus(jobId, 0);
}
else {
System.out.println("error...");
jobsDao.updateJobStatus(jobId, 1);
}
**boolean validfile = handleReportContentv2();**
if(!validfile) {
System.out.println("error file...");
jobsDao.updateJobStatus(jobId, 1);
}
break;
case TYPE_B_ID:
(...)
}
if(valid) {
jobsDao.updateJobStatus(jobw.getJob().getJobId().intValue(), 2); //updated to complete
}
System.out.println("***********---------Finished JOB " + jobId + "-----------****************");
System.out.println();
jobw = null;
} catch (JMSException ex) {
Logger.getLogger(JobProcessorBean.class.getName()).log(Level.SEVERE, null, ex);
jobsDao.updateJobStatus(jobId, 1);
} catch (Exception ex) {
Logger.getLogger(JobProcessorBean.class.getName()).log(Level.SEVERE, null, ex);
jobsDao.updateJobStatus(jobId, 1);
} finally {
msg = null;
}
}
private boolean processTask1(JobWrapper jobw) throws Exception {
boolean valid = true;
jobsDao.updateJobStatus(jobw.getJob().getJobId().intValue(), 0);
generator.setData(jobw.getData());
valid = generator.deployGenerator();
if(!valid) return false;
jobsDao.updateJobParameters(jobw.getJob().getJobId().intValue(),new ReportContent());
Logger.getLogger(JobProcessorBean.class.getName()).log(Level.INFO, null, "Job Finished");
return true;
}
So if the same method, handleReportContent() is executed inside the generator.deployGenerator() is has those slow results. If I wait for everything inside that method and make the file in this bean JobProcessorBean is way more fast. I am just trying to figure out why/how the behavior works to performs like this.
Adding the #TransactionAttribute(NOT_SUPPORTED) annotation on the bean might solve the problem (and it did, as your comment indicates).
Why is this so? Because if you don't put any transactional annotation on a message-driven bean, the default becomes #TransactionAttribute(REQUIRED) (so everything the bean does, is supervised by a transaction manager). Apparently, this slows things down.

Using the JGIT, how can I retrieve the line numbers of added/deleted lines

Assuming the following piece of code is committed to a Git repository:
int test(){
int a = 3;
int b = 4;
int c = a + b;
return c;
}
and is later updated to
int test(){
return 7;
}
I currently have a method which uses the JGit API in order to access the Git repository where the above are committed and outputs a string which is similar to the following:
int test(){
-int a = 3;
-int b = 4;
-int c = a + b;
-return c;
+return 7;
}
Now, my requirements have changed and would like to know the line numbers of the changed lines only. So I would want something like the following:
2 -int a = 3;
3 -int b = 4;
4 -int c = a + b;
5 -return c;
2 +return 7;
Basically, the same information that the GitHub application gives when an update is made.
Any help would be greatly appreciated :)
snippet of how the -/+ lines are computed:
String oldHash = "ee3e216ab5047748a22e9ec5ad3e92834704f0cc";
Git git = null;
try {
//the path where the repo is.
git = Git.open(new File("C:\\Users\\Administrator\\Documents\\GitHub\\Trial"));
} catch (IOException e1) {
e1.printStackTrace();
}
Repository repository = git.getRepository();
ObjectId old = null;
ObjectId head = null;
//a new reader to read objects from getObjectDatabase()
ObjectReader reader = repository.newObjectReader();
//Create a new parser.
CanonicalTreeParser oldTreeIter = new CanonicalTreeParser();
CanonicalTreeParser newTreeIter = new CanonicalTreeParser();
List<DiffEntry> diffs = null;
try {
//parse a git repository string and return an ObjectId
old = repository.resolve(oldHash + "^{tree}");
head = repository.resolve("HEAD^{tree}");
//Reset this parser to walk through the given tree
oldTreeIter.reset(reader, old);
newTreeIter.reset(reader, head);
diffs = git.diff()//Returns a command object to execute a diff command
.setNewTree(newTreeIter)
.setOldTree(oldTreeIter)
.call();//returns a DiffEntry for each path which is different
} catch (RevisionSyntaxException | IOException | GitAPIException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
//DiffLineCountFilter d = new DiffLineCountFilter();
//out is the stream the formatter will write to
ByteArrayOutputStream out = new ByteArrayOutputStream();
//Create a new formatter with a default level of context.
DiffFormatter df = new DiffFormatter(out);
//Set the repository the formatter can load object contents from.
df.setRepository(git.getRepository());
ArrayList<String> diffText = new ArrayList<String>();
//A DiffEntry is 'A value class representing a change to a file' therefore for each file you have a diff entry
for(DiffEntry diff : diffs)
{
try {
//Format a patch script for one file entry.
df.format(diff);
RawText r = new RawText(out.toByteArray());
r.getLineDelimiter();
diffText.add(out.toString());
out.reset();
} catch (IOException e) {
e.printStackTrace();
}
}
You need to do the difference between the A line indexes and B line indexes from the diff result:
int linesAdded = 0;
int linesDeleted = 0;
int filesChanged = 0;
try {
repo = new FileRepository(new File("repo/.git"));
RevWalk rw = new RevWalk(repo);
RevCommit commit = rw.parseCommit(repo.resolve("486817d67b")); // Any ref will work here (HEAD, a sha1, tag, branch)
RevCommit parent = rw.parseCommit(commit.getParent(0).getId());
DiffFormatter df = new DiffFormatter(DisabledOutputStream.INSTANCE);
df.setRepository(repo);
df.setDiffComparator(RawTextComparator.DEFAULT);
df.setDetectRenames(true);
List<DiffEntry> diffs;
diffs = df.scan(parent.getTree(), commit.getTree());
filesChanged = diffs.size();
for (DiffEntry diff : diffs) {
for (Edit edit : df.toFileHeader(diff).toEditList()) {
linesDeleted += edit.getEndA() - edit.getBeginA();
linesAdded += edit.getEndB() - edit.getBeginB();
}
}
} catch (IOException e1) {
throw new RuntimeException(e1);
}
Just a tip for anyone who might have this problem. I did not manage to get the line numbers of the added and deleted lines but I did manage to get a string which contains only the added and deleted lines without the other lines which were not changed.
This was simply done by adding the line:
df.setContext(0);
in the snippet I provided above right before the line
df.format(diff);
I do it this way but I don't know if it is correct
public void linesChangeInFile(Git git, List<RevCommit> commits, String fileName, String pathRepository) {
try {
List<RevCommit> commitsComparer = new ArrayList<>();
List<String> linesChange = new ArrayList<>();
for (int i = 0; i < commits.size() - 1; i++) {
ObjectId commitIDOld = commits.get(i).getId();
if (Validador.isFileExistInCommit(commits.get(i), getRepository(), fileName)) {
if (i != commits.size() - 1 && !commitsComparer.contains(commits.get(i))) {
ObjectId commitIDNew = commits.get(i + 1);
commitsComparer.add(commits.get(i));
linesChange.add(diff(git, commitIDOld.getName(), commitIDNew.getName(), fileName));
}
try (final FileInputStream input = new FileInputStream(pathRepository + "\\" + fileName)) {
currentLines = IOUtils.readLines(input, "UTF-8").size();
}
}
}
Integer sumLinesAdd = 0;
Integer sumLinesDel = 0;
for (String lineChange : linesChange) {
String[] lChange = lineChange.split(";");
sumLinesAdd += Integer.parseInt(lChange[0]);
sumLinesDel += Integer.parseInt(lChange[1]);
}
System.out.println("Lines Add total:" + sumLinesAdd);
System.out.println("Lines Del total:" + sumLinesDel);
System.out.println("Total lines change:" + (sumLinesAdd + sumLinesDel));
} catch (RevisionSyntaxException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
private String diff(Git git, String commitIDOld, String commitIDNew, String fileName) {
int linesAdded = 0;
int linesDeleted = 0;
DiffFormatter df = null;
try {
AbstractTreeIterator oldTreeParser = prepareTreeParser(getRepository(), commitIDOld);
AbstractTreeIterator newTreeParser = prepareTreeParser(getRepository(), commitIDNew);
List<DiffEntry> diffs = git.diff().setOldTree(oldTreeParser).setNewTree(newTreeParser)
.setPathFilter(PathFilter.create(fileName)).call();
df = new DiffFormatter(DisabledOutputStream.INSTANCE);
df.setRepository(getRepository());
df.setDiffComparator(RawTextComparator.DEFAULT);
df.setDetectRenames(true);
for (DiffEntry entry : diffs) {
// System.out.println("Entry: " + entry + ", from: " + entry.getOldId() + ", to:
// " + entry.getNewId());
// try (DiffFormatter formatter = new DiffFormatter(System.out)) {
// formatter.setContext(0);
// formatter.setRepository(repository);
// formatter.format(entry);
// }
for (Edit edit : df.toFileHeader(entry).toEditList()) {
linesDeleted += edit.getEndA() - edit.getBeginA();
linesAdded += edit.getEndB() - edit.getBeginB();
}
}
} catch (IOException | GitAPIException e) {
System.err.println("Error:" + e.getMessage());
}
return linesAdded + ";" + linesDeleted;
}

Categories

Resources