Convert BufferedReader To InputStream - java

I just finished writing a bunch of code that works fine when reading from internal resource :))))
try {
SeriesDataXMLPullParserHandler seriesDataXmlPullParserHandler = new SeriesDataXMLPullParserHandler();
entries = seriesDataXmlPullParserHandler.parse(getAssets().open("series_box.xml"));
} catch (IOException e) {
e.printStackTrace();
Log.d("errorOpeningSeries", e.getMessage());
}
Collections.sort(entries, new Comparator<Entry>() {
#Override
public int compare(Entry entryOne, Entry entryTwo) {
return (entryOne.getSeriesName().compareTo(entryTwo.getSeriesName()));
}
});
listView.setAdapter(new MyAdapter(this, R.id.details_SeriesName, entries));
"SeriesDataXMLPullParserHandler" class parse data from xml file that uses InputStream as argument
here is "SeriesDataXMLPullParserHandler" class
public class SeriesDataXMLPullParserHandler {
List<Entry> entries;
private Entry entry;
private String text;
public SeriesDataXMLPullParserHandler() {
entries = new ArrayList<>();
}
public List<Entry> getEntries() {
return entries;
}
public List<Entry> parse(InputStream inputStream) {
XmlPullParserFactory xmlPullParserFactory = null;
XmlPullParser xmlPullParser = null;
try {
xmlPullParserFactory = XmlPullParserFactory.newInstance();
xmlPullParserFactory.setNamespaceAware(true);
xmlPullParser = xmlPullParserFactory.newPullParser();
xmlPullParser.setInput(inputStream, null);
int eventType = xmlPullParser.getEventType();
while (eventType != XmlPullParser.END_DOCUMENT) {
String tagname = xmlPullParser.getName();
switch (eventType) {
case XmlPullParser.START_TAG:
if (tagname.equalsIgnoreCase("series")) {
entry = new Entry();
}
break;
case XmlPullParser.TEXT:
text = xmlPullParser.getText();
break;
case XmlPullParser.END_TAG:
if (tagname.equalsIgnoreCase("series")) {
entries.add(entry);
} else if (tagname.equalsIgnoreCase("id")) {
entry.setId(text);
} else if (tagname.equalsIgnoreCase("Actors")) {
entry.setActors(text);
}else if (tagname.equalsIgnoreCase("Genre")) {
entry.setGenre(text);
} else if (tagname.equalsIgnoreCase("IMDB_ID")) {
entry.setImdb_id(text);
} else if (tagname.equalsIgnoreCase("Language")) {
entry.setLanguage(text);
} else if (tagname.equalsIgnoreCase("Network")) {
entry.setNetwork(text);
} else if (tagname.equalsIgnoreCase("NetworkID")) {
entry.setNetwork_id(text);
} else if (tagname.equalsIgnoreCase("Overview")) {
entry.setOverview(text);
} else if (tagname.equalsIgnoreCase("SeriesID")) {
entry.setSeriesId(text);
} else if (tagname.equalsIgnoreCase("SeriesName")) {
entry.setSeriesName(text);
}
break;
default:
break;
}
eventType = xmlPullParser.next();
}
} catch (XmlPullParserException | IOException e) {
e.printStackTrace();
}
return entries;
}
}
but the problem is when I want to get data from server, it comes in "InputStreamReader" type
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(url.openStream()));
the question is how do I change the type of "BufferedReader" to "InputStream" for parsing data???
or the best way to do such a thing ???
sorry for bad english :)

Related

Execute command line equivalent to Runtime.getRuntime().exec(cmd); in JNI C

I was developing an app which had requirement to implement root detection logic, so by researching I found some detection logic in JAVA and had implemented following class.
class RootDetection {
public boolean isDeviceRooted() {
return checkForBinary("su") || checkForBinary("busybox") || checkForMaliciousPaths() || checkSUonPath()
|| detectRootManagementApps() || detectPotentiallyDangerousApps() || detectRootCloakingApps()
|| checkForDangerousProps() || checkForRWPaths()
|| detectTestKeys() || checkSuExists();
}
private boolean detectTestKeys() {
String buildTags = android.os.Build.TAGS;
String buildFinger = Build.FINGERPRINT;
String product = Build.PRODUCT;
String hardware = Build.HARDWARE;
String display = Build.DISPLAY;
System.out.println("Java: build: " + buildTags + "\nFingerprint: " + buildFinger + "\n Product: " + product + "\n Hardware: " + hardware + "\nDisplay: " + display);
return (buildTags != null) && (buildTags.contains("test-keys") || buildFinger.contains("genric.*test-keys") || product.contains("generic") || product.contains("sdk") || hardware.contains("goldfish") || display.contains(".*test-keys"));
}
private boolean detectRootManagementApps() {
return detectRootManagementApps(null);
}
private boolean detectRootManagementApps(String[] additionalRootManagementApps) {
ArrayList<String> packages = new ArrayList<>();
packages.addAll(Arrays.asList(knownRootAppsPackages));
if (additionalRootManagementApps != null && additionalRootManagementApps.length > 0) {
packages.addAll(Arrays.asList(additionalRootManagementApps));
}
return isAnyPackageFromListInstalled(packages);
}
private boolean detectPotentiallyDangerousApps() {
return detectPotentiallyDangerousApps(null);
}
private boolean detectPotentiallyDangerousApps(String[] additionalDangerousApps) {
ArrayList<String> packages = new ArrayList<>();
packages.addAll(Arrays.asList(knownDangerousAppsPackages));
if (additionalDangerousApps != null && additionalDangerousApps.length > 0) {
packages.addAll(Arrays.asList(additionalDangerousApps));
}
return isAnyPackageFromListInstalled(packages);
}
private boolean detectRootCloakingApps() {
return detectRootCloakingApps(null);
}
private boolean detectRootCloakingApps(String[] additionalRootCloakingApps) {
ArrayList<String> packages = new ArrayList<>();
packages.addAll(Arrays.asList(knownRootCloakingPackages));
if (additionalRootCloakingApps != null && additionalRootCloakingApps.length > 0) {
packages.addAll(Arrays.asList(additionalRootCloakingApps));
}
return isAnyPackageFromListInstalled(packages);
}
private boolean checkForBinary(String filename) {
for (String path : suPaths) {
String completePath = path + filename;
File f = new File(completePath);
boolean fileExists = f.exists();
if (fileExists) {
return true;
}
}
return false;
}
private boolean checkForMaliciousPaths() {
for (String path : maliciousPaths) {
File f = new File(path);
boolean fileExists = f.exists();
if (fileExists) {
return true;
}
}
return false;
}
private static boolean checkSUonPath() {
for (String pathDir : System.getenv("PATH").split(":")) {
if (new File(pathDir, "su").exists()) {
return true;
}
}
return false;
}
private String[] propsReader() {
InputStream inputstream = null;
try {
inputstream = Runtime.getRuntime().exec("getprop").getInputStream();
} catch (IOException e) {
e.printStackTrace();
}
String propval = "";
try {
propval = new Scanner(inputstream).useDelimiter("\\A").next();
} catch (NoSuchElementException e) {
}
return propval.split("\n");
}
private String[] mountReader() {
InputStream inputstream = null;
try {
inputstream = Runtime.getRuntime().exec("mount").getInputStream();
} catch (IOException e) {
e.printStackTrace();
}
if (inputstream == null) return null;
String propval = "";
try {
propval = new Scanner(inputstream).useDelimiter("\\A").next();
} catch (NoSuchElementException e) {
e.printStackTrace();
}
return propval.split("\n");
}
private boolean isAnyPackageFromListInstalled(List<String> packages) {
PackageManager pm = activity.getPackageManager();
for (String packageName : packages) {
try {
pm.getPackageInfo(packageName, 0);
return true;
} catch (PackageManager.NameNotFoundException e) {
}
}
return false;
}
private boolean checkForDangerousProps() {
final Map<String, String> dangerousProps = new HashMap<>();
dangerousProps.put("ro.debuggable", "1");
dangerousProps.put("ro.secure", "0");
String[] lines = propsReader();
for (String line : lines) {
for (String key : dangerousProps.keySet()) {
if (line.contains(key)) {
String badValue = dangerousProps.get(key);
badValue = "[" + badValue + "]";
if (line.contains(badValue)) {
return true;
}
}
}
}
return false;
}
private boolean checkForRWPaths() {
String[] lines = mountReader();
for (String line : lines) {
String[] args = line.split(" ");
if (args.length < 4) {
continue;
}
String mountPoint = args[1];
String mountOptions = args[3];
for (String pathToCheck : pathsThatShouldNotBeWrtiable) {
if (mountPoint.equalsIgnoreCase(pathToCheck)) {
for (String option : mountOptions.split(",")) {
if (option.equalsIgnoreCase("rw")) {
return true;
}
}
}
}
}
return false;
}
private boolean checkSuExists() {
Process process = null;
try {
process = Runtime.getRuntime().exec(new String[]{"which", "su"});
BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream()));
return in.readLine() != null;
} catch (Throwable t) {
return false;
} finally {
if (process != null) process.destroy();
}
}
}
but now to increase security I want to do this root detection logic in native C++ JNI code. I managed to migrate package detection code to JNI C but am not able to find anything regarding these 3 functions
checkForDangerousProps(),checkForRWPaths(),checkSuExists()
these 3 use Runtime.getRuntime().exec which am not able to find. can someone help me in converting this 3 logics to JNI C one from above code? Help would be really appreciated.
Pls guys help.

Java LightSIDE - How to categorize data with LightSIDE?

I have set LightSIDE plugin and can run properly, but I don't know why I can't save my data to empty file? This is what a simple structure I made.
Activity is the list data that need to be categorize.
I have 3 categories and each of them have each type.
I already define each category with specific list of Words. For example : Food ({Sushi, Food, Japan}, {Cap Jay, Food, Chinese}, {Jog, Sport, Running}, ...)
And this is how I save my prediction with LightSIDE.
public void predictSectionType(String[] sections, List<String> activityList) {
LightSideService currentLightsideHelper = new LightSideService();
Recipe newRecipe;
// Initialize SIDEPlugin
currentLightsideHelper.initSIDEPlugin();
try {
// Load Recipe with Extracted Features & Trained Models
ClassLoader myClassLoader = getClass().getClassLoader();
newRecipe = ConverterControl.readFromXML(new InputStreamReader(myClassLoader.getResourceAsStream("static/lightsideTrainingResult/trainingData.xml")));
// Predict Result Data
Recipe recipeToPredict = currentLightsideHelper.loadNewDocumentsFromCSV(sections); // DocumentList & Recipe Created
currentLightsideHelper.predictLabels(recipeToPredict, newRecipe);
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
I have class of LightSideService as Summary Class of LightSIDE function.
public class LightSideService {
// Extract Features Parameters
final String featureTableName = "1Grams";
final int featureThreshold = 2;
final String featureAnnotation = "Code";
final Type featureType = Type.NOMINAL;
// Build Models Parameters
final String trainingResultName = "Bayes_1Grams";
// Predict Labels Parameters
final String predictionColumnName = featureAnnotation + "_Prediction";
final boolean showMaxScore = false;
final boolean showDists = true;
final boolean overwrite = false;
final boolean useEvaluation = false;
public DocumentListTableModel model = new DocumentListTableModel(null);
public Map<String, Serializable> validationSettings = new TreeMap<String, Serializable>();
public Map<FeaturePlugin, Boolean> featurePlugins = new HashMap<FeaturePlugin, Boolean>();
public Map<LearningPlugin, Boolean> learningPlugins = new HashMap<LearningPlugin, Boolean>();
public Collection<ModelMetricPlugin> modelEvaluationPlugins = new ArrayList<ModelMetricPlugin>();
public Map<WrapperPlugin, Boolean> wrapperPlugins = new HashMap<WrapperPlugin, Boolean>();
// Initialize Data ==================================================
public void initSIDEPlugin() {
SIDEPlugin[] featureExtractors = PluginManager.getSIDEPluginArrayByType("feature_hit_extractor");
boolean selected = true;
for (SIDEPlugin fe : featureExtractors) {
featurePlugins.put((FeaturePlugin) fe, selected);
selected = false;
}
SIDEPlugin[] learners = PluginManager.getSIDEPluginArrayByType("model_builder");
for (SIDEPlugin le : learners) {
learningPlugins.put((LearningPlugin) le, true);
}
SIDEPlugin[] tableEvaluations = PluginManager.getSIDEPluginArrayByType("model_evaluation");
for (SIDEPlugin fe : tableEvaluations) {
modelEvaluationPlugins.add((ModelMetricPlugin) fe);
}
SIDEPlugin[] wrappers = PluginManager.getSIDEPluginArrayByType("learning_wrapper");
for (SIDEPlugin wr : wrappers) {
wrapperPlugins.put((WrapperPlugin) wr, false);
}
}
//Used to Train Models, adjust parameters according to model
public void initValidationSettings(Recipe currentRecipe) {
validationSettings.put("testRecipe", currentRecipe);
validationSettings.put("testSet", currentRecipe.getDocumentList());
validationSettings.put("annotation", "Age");
validationSettings.put("type", "CV");
validationSettings.put("foldMethod", "AUTO");
validationSettings.put("numFolds", 10);
validationSettings.put("source", "RANDOM");
validationSettings.put("test", "true");
}
// Load CSV Doc ==================================================
public Recipe loadNewDocumentsFromCSV(String filePath) {
DocumentList testDocs;
testDocs = chooseDocumentList(filePath);
if (testDocs != null) {
testDocs.guessTextAndAnnotationColumns();
Recipe currentRecipe = Recipe.fetchRecipe();
currentRecipe.setDocumentList(testDocs);
return currentRecipe;
}
return null;
}
public Recipe loadNewDocumentsFromCSV(String[] rootCauseList) {
DocumentList testDocs;
testDocs = chooseDocumentList(rootCauseList);
if (testDocs != null) {
testDocs.guessTextAndAnnotationColumns();
Recipe currentRecipe = Recipe.fetchRecipe();
currentRecipe.setDocumentList(testDocs);
return currentRecipe;
}
return null;
}
protected DocumentList chooseDocumentList(String filePath) {
TreeSet<String> docNames = new TreeSet<String>();
docNames.add(filePath);
try {
DocumentList testDocs;
Charset encoding = Charset.forName("UTF-8");
{
testDocs = ImportController.makeDocumentList(docNames, encoding);
}
return testDocs;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
protected DocumentList chooseDocumentList(String[] rootCauseList) {
try {
DocumentList testDocs;
testDocs = new DocumentList();
testDocs.setName("TestData.csv");
List<String> codes = new ArrayList();
List<String> roots = new ArrayList();
for (String s : rootCauseList) {
codes.add("");
roots.add((s != null) ? s : "");
}
testDocs.addAnnotation("Code", codes, false);
testDocs.addAnnotation("Root Cause Failure Description", roots, false);
return testDocs;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
// Save/Load XML ==================================================
public void saveRecipeToXml(Recipe currentRecipe, String filePath) {
File f = new File(filePath);
try {
ConverterControl.writeToXML(f, currentRecipe);
} catch (Exception e) {
e.printStackTrace();
}
}
public Recipe loadRecipeFromXml(String filePath) throws FileNotFoundException, IOException {
Recipe currentRecipe = ConverterControl.loadRecipe(filePath);
return currentRecipe;
}
// Extract Features ==================================================
public Recipe prepareBuildFeatureTable(Recipe currentRecipe) {
// Add Feature Plugins
Collection<FeaturePlugin> plugins = new TreeSet<FeaturePlugin>();
for (FeaturePlugin plugin : featurePlugins.keySet()) {
String pluginString = plugin.toString();
if (pluginString == "Basic Features" || pluginString == "Character N-Grams") {
plugins.add(plugin);
}
}
// Generate Plugin into Recipe
currentRecipe = Recipe.addPluginsToRecipe(currentRecipe, plugins);
// Setup Plugin configurations
OrderedPluginMap currentOrderedPluginMap = currentRecipe.getExtractors();
for (SIDEPlugin plugin : currentOrderedPluginMap.keySet()) {
String pluginString = plugin.toString();
Map<String, String> currentConfigurations = currentOrderedPluginMap.get(plugin);
if (pluginString == "Basic Features") {
for (String s : currentConfigurations.keySet()) {
if (s == "Unigrams" || s == "Bigrams" || s == "Trigrams" ||
s == "Count Occurences" || s == "Normalize N-Gram Counts" ||
s == "Stem N-Grams" || s == "Skip Stopwords in N-Grams") {
currentConfigurations.put(s, "true");
} else {
currentConfigurations.put(s, "false");
}
}
} else if (pluginString == "Character N-Grams") {
for (String s : currentConfigurations.keySet()) {
if (s == "Include Punctuation") {
currentConfigurations.put(s, "true");
} else if (s == "minGram") {
currentConfigurations.put(s, "3");
} else if (s == "maxGram") {
currentConfigurations.put(s, "4");
}
}
currentConfigurations.put("Extract Only Within Words", "true");
}
}
// Build FeatureTable
currentRecipe = buildFeatureTable(currentRecipe, featureTableName, featureThreshold, featureAnnotation, featureType);
return currentRecipe;
}
protected Recipe buildFeatureTable(Recipe currentRecipe, String name, int threshold, String annotation, Type type) {
FeaturePlugin activeExtractor = null;
try {
Collection<FeatureHit> hits = new HashSet<FeatureHit>();
for (SIDEPlugin plug : currentRecipe.getExtractors().keySet()) {
activeExtractor = (FeaturePlugin) plug;
hits.addAll(activeExtractor.extractFeatureHits(currentRecipe.getDocumentList(), currentRecipe.getExtractors().get(plug)));
}
FeatureTable ft = new FeatureTable(currentRecipe.getDocumentList(), hits, threshold, annotation, type);
ft.setName(name);
currentRecipe.setFeatureTable(ft);
} catch (Exception e) {
System.err.println("Feature Extraction Failed");
e.printStackTrace();
}
return currentRecipe;
}
// Build Models ==================================================
public Recipe prepareBuildModel(Recipe currentRecipe) {
try {
// Get Learner Plugins
LearningPlugin learner = null;
for (LearningPlugin plugin : learningPlugins.keySet()) {
/* if (plugin.toString() == "Naive Bayes") */
if (plugin.toString() == "Logistic Regression") {
learner = plugin;
}
}
if (Boolean.TRUE.toString().equals(validationSettings.get("test"))) {
if (validationSettings.get("type").equals("CV")) {
validationSettings.put("testSet", currentRecipe.getDocumentList());
}
}
Map<String, String> settings = learner.generateConfigurationSettings();
currentRecipe = Recipe.addLearnerToRecipe(currentRecipe, learner, settings);
currentRecipe.setValidationSettings(new TreeMap<String, Serializable>(validationSettings));
for (WrapperPlugin wrap : wrapperPlugins.keySet()) {
if (wrapperPlugins.get(wrap)) {
currentRecipe.addWrapper(wrap, wrap.generateConfigurationSettings());
}
}
buildModel(currentRecipe, validationSettings);
} catch (Exception e) {
e.printStackTrace();
}
return currentRecipe;
}
protected void buildModel(Recipe currentRecipe,
Map<String, Serializable> validationSettings) {
try {
FeatureTable currentFeatureTable = currentRecipe.getTrainingTable();
if (currentRecipe != null) {
TrainingResult results = null;
/*
* if (validationSettings.get("type").equals("SUPPLY")) {
* DocumentList test = (DocumentList)
* validationSettings.get("testSet"); FeatureTable
* extractTestFeatures = prepareTestFeatureTable(currentRecipe,
* validationSettings, test);
* validationSettings.put("testFeatureTable",
* extractTestFeatures);
*
* // if we've already trained the exact same model, don't // do
* it again. Just evaluate. Recipe cached =
* checkForCachedModel(); if (cached != null) { results =
* evaluateUsingCachedModel(currentFeatureTable,
* extractTestFeatures, cached, currentRecipe); } }
*/
if (results == null) {
results = currentRecipe.getLearner().train(currentFeatureTable, currentRecipe.getLearnerSettings(), validationSettings, currentRecipe.getWrappers());
}
if (results != null) {
currentRecipe.setTrainingResult(results);
results.setName(trainingResultName);
currentRecipe.setLearnerSettings(currentRecipe.getLearner().generateConfigurationSettings());
currentRecipe.setValidationSettings(new TreeMap<String, Serializable>(validationSettings));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
protected static FeatureTable prepareTestFeatureTable(Recipe recipe, Map<String, Serializable> validationSettings, DocumentList test) {
prepareDocuments(recipe, validationSettings, test); // assigns classes, annotations.
Collection<FeatureHit> hits = new TreeSet<FeatureHit>();
OrderedPluginMap extractors = recipe.getExtractors();
for (SIDEPlugin plug : extractors.keySet()) {
Collection<FeatureHit> extractorHits = ((FeaturePlugin) plug).extractFeatureHits(test, extractors.get(plug));
hits.addAll(extractorHits);
}
FeatureTable originalTable = recipe.getTrainingTable();
FeatureTable ft = new FeatureTable(test, hits, 0, originalTable.getAnnotation(), originalTable.getClassValueType());
for (SIDEPlugin plug : recipe.getFilters().keySet()) {
ft = ((RestructurePlugin) plug).filterTestSet(originalTable, ft, recipe.getFilters().get(plug), recipe.getFilteredTable().getThreshold());
}
ft.reconcileFeatures(originalTable.getFeatureSet());
return ft;
}
protected static Map<String, Serializable> prepareDocuments(Recipe currentRecipe, Map<String, Serializable> validationSettings, DocumentList test) throws IllegalStateException {
DocumentList train = currentRecipe.getDocumentList();
try {
test.setCurrentAnnotation(currentRecipe.getTrainingTable().getAnnotation(), currentRecipe.getTrainingTable().getClassValueType());
test.setTextColumns(new HashSet<String>(train.getTextColumns()));
test.setDifferentiateTextColumns(train.getTextColumnsAreDifferentiated());
Collection<String> trainColumns = train.allAnnotations().keySet();
Collection<String> testColumns = test.allAnnotations().keySet();
if (!testColumns.containsAll(trainColumns)) {
ArrayList<String> missing = new ArrayList<String>(trainColumns);
missing.removeAll(testColumns);
throw new java.lang.IllegalStateException("Test set annotations do not match training set.\nMissing columns: " + missing);
}
validationSettings.put("testSet", test);
} catch (Exception e) {
e.printStackTrace();
throw new java.lang.IllegalStateException("Could not prepare test set.\n" + e.getMessage(), e);
}
return validationSettings;
}
//Predict Labels ==================================================
public void predictLabels(Recipe recipeToPredict, Recipe currentRecipe) {
DocumentList newDocs = null;
DocumentList originalDocs;
if (useEvaluation) {
originalDocs = recipeToPredict.getTrainingResult().getEvaluationTable().getDocumentList();
TrainingResult results = currentRecipe.getTrainingResult();
List<String> predictions = (List<String>) results.getPredictions();
newDocs = addLabelsToDocs(predictionColumnName, showDists, overwrite, originalDocs, results, predictions, currentRecipe.getTrainingTable());
} else {
originalDocs = recipeToPredict.getDocumentList();
Predictor predictor = new Predictor(currentRecipe, predictionColumnName);
newDocs = predictor.predict(originalDocs, predictionColumnName, showDists, overwrite);
}
// Predict Labels result
model.setDocumentList(newDocs);
}
protected DocumentList addLabelsToDocs(final String name, final boolean showDists, final boolean overwrite, DocumentList docs, TrainingResult results, List<String> predictions, FeatureTable currentFeatureTable) {
Map<String, List<Double>> distributions = results.getDistributions();
DocumentList newDocs = docs.clone();
newDocs.addAnnotation(name, predictions, overwrite);
if (distributions != null) {
if (showDists) {
for (String label : currentFeatureTable.getLabelArray()) {
List<String> dist = new ArrayList<String>();
for (int i = 0; i < predictions.size(); i++) {
dist.add(String.format("%.3f", distributions.get(label).get(i)));
}
newDocs.addAnnotation(name + "_" + label + "_score", dist, overwrite);
}
}
}
return newDocs;
}
// ==================================================
}
David. It looks like the above replicates a lot of the functionality from the edu.cmu.side.recipe package. However, it doesn't look like your predictSectionType() method actually outputs the model's predictions anywhere.
If what you're trying to do is indeed to save predictions on new data using a trained model, check out the edu.cmu.side.recipe.Predictor class. It takes a trained model path as input, It's used by the scripts/predict.sh convenience script, but you could repurpose its main method if you needed to call it programmatically.
I hope this helps!

Java inner class new instance not being created

I have a Java class that is going to have a number of inner classes. This is done for organization and to keep things in a separate file.
public class PUCObjects
{
public static class PUCNewsItem
{
public String title;
public String summary;
public String body;
public String url;
public String imageUrl;
}
}
I am then trying to create a new instance of that inner class (doing this in another class that parses some remote XML), but for some reason it doesn't seem to get created:
public static ArrayList<PUCObjects.PUCNewsItem> getPUCNews() throws IOException {
String url = "http://api.puc.edu/news/list?key="+API_KEY+"&count=30";
InputStream is = downloadUrl(url);
XmlPullParserFactory pullParserFactory;
try {
pullParserFactory = XmlPullParserFactory.newInstance();
XmlPullParser parser = pullParserFactory.newPullParser();
parser.setInput(is, null);
ArrayList<PUCObjects.PUCNewsItem> items = null;
int eventType = parser.getEventType();
PUCObjects.PUCNewsItem item = null;
Log.d("Debug: ", "Start: "+url);
while (eventType != XmlPullParser.END_DOCUMENT){
String name = null;
switch (eventType){
case XmlPullParser.START_DOCUMENT:
items = new ArrayList<PUCObjects.PUCNewsItem>();
break;
case XmlPullParser.START_TAG:
name = parser.getName();
//Log.d("Start Tag Name: ", parser.getName()+" === "+name);
if (name == "item"){
Log.d("Debug: ", "Item");
item = new PUCObjects.PUCNewsItem();
} else if (item != null){
Log.d("Debug: ", "Item is not NULL 2");
if (name == "title"){
Log.d("Title: ", parser.nextText());
item.title = parser.nextText();
} else if (name == "summary"){
item.summary = parser.nextText();
} else if (name == "body_text"){
item.body = parser.nextText();
}
}
break;
case XmlPullParser.END_TAG:
name = parser.getName();
if (name.equalsIgnoreCase("item") && item != null) {
Log.d("Debug: ", "ADD ITEM");
items.add(item);
}
break;
}//end switch
eventType = parser.next();
}//end while
Log.d("Debug: ", "Done");
return items;
} catch (XmlPullParserException e) {
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}//end
I am trying to create the object like item = new PUCObjects.PUCNewsItem(); but it seems to always be null.
Is there a reason why this is object isn't getting created?
Problem is String comparison. Your if statement is not resulting to true due to == check.
if (name == "item"){
You need to use equals() method instead of == when comparing Objects/Strings. Read this thread for more information on eqauals() vs ==

Load very heavy stream with GSON

I'm trying to read a very heavy JSON (over than 6000 objects) and store them on a hash map to insert it into my database later.
But the problem is that I face with OOM and that's cause from my heavy JSON, however GSON library should rid me from this situation, but it is not !!!
Any ideas?
public Map<String,String> readJsonStream(InputStream in) throws IOException
{
JsonReader reader = new JsonReader(new InputStreamReader(in, "UTF-8"));
Map<String,String> contentMap = new HashMap<String,String>();
Gson mGson = new Gson();
contentMap = mGson.fromJson(reader, contentMap.getClass());
reader.close();
return contentMap;
}
From my experience, yes you can use google GSON to stream JSON data this is an example how to do it :
APIModel result = new APIModel();
try {
HttpResponse response;
HttpClient myClient = new DefaultHttpClient();
HttpPost myConnection = new HttpPost(APIParam.API_001_PRESENT(
serial_id, api_key));
try {
response = myClient.execute(myConnection);
Reader streamReader = new InputStreamReader(response
.getEntity().getContent());
JsonReader reader = new JsonReader(streamReader);
reader.beginObject();
while (reader.hasNext()) {
String name = reader.nextName();
if (name.equals("result")) {
if (reader.nextString() == "NG") {
result.setResult(Util.API_001_RESULT_NG);
break;
}
} else if (name.equals("items")) {
result = readItemsArray(reader);
} else {
reader.skipValue(); // avoid some unhandle events
}
}
reader.endObject();
reader.close();
} catch (Exception e) {
e.printStackTrace();
result.setResult(Util.API_001_RESULT_NG);
}
} catch (Exception e) {
e.printStackTrace();
result.setResult(Util.API_001_RESULT_NG);
}
readItemsArray function :
// read items array
private APIModel readItemsArray(JsonReader reader) throws IOException {
APIModel result = new APIModel();
String item_name, file_name, data;
result.setResult(Util.API_001_RESULT_OK);
reader.beginArray();
while (reader.hasNext()) {
item_name = "";
file_name = "";
data = "";
reader.beginObject();
while (reader.hasNext()) {
String name = reader.nextName();
if (name.equals("name")) {
item_name = reader.nextString();
} else if (name.equals("file")) {
file_name = reader.nextString();
} else if (name.equals("data")) {
data = reader.nextString();
} else {
reader.skipValue();
}
}
reader.endObject();
result.populateModel("null", item_name, file_name, data);
}
reader.endArray();
return result;
}
API Model Class :
public class APIModel {
private int result;
private String error_title;
private String error_message;
private ArrayList<String> type;
private ArrayList<String> item_name;
private ArrayList<String> file_name;
private ArrayList<String> data;
public APIModel() {
result = -1;
error_title = "";
error_message = "";
setType(new ArrayList<String>());
setItem_name(new ArrayList<String>());
setFile_name(new ArrayList<String>());
setData(new ArrayList<String>());
}
public void populateModel(String type, String item_name, String file_name, String data) {
this.type.add(type);
this.item_name.add(item_name);
this.file_name.add(file_name);
this.data.add(data);
}
public int getResult() {
return result;
}
public void setResult(int result) {
this.result = result;
}
public String getError_title() {
return error_title;
}
public void setError_title(String error_title) {
this.error_title = error_title;
}
public String getError_message() {
return error_message;
}
public void setError_message(String error_message) {
this.error_message = error_message;
}
public ArrayList<String> getType() {
return type;
}
public void setType(ArrayList<String> type) {
this.type = type;
}
public ArrayList<String> getItem_name() {
return item_name;
}
public void setItem_name(ArrayList<String> item_name) {
this.item_name = item_name;
}
public ArrayList<String> getFile_name() {
return file_name;
}
public void setFile_name(ArrayList<String> file_name) {
this.file_name = file_name;
}
public ArrayList<String> getData() {
return data;
}
public void setData(ArrayList<String> data) {
this.data = data;
}
}
before I use the streaming API from google GSON I also got OOM error because the JSON data I got is very big data (many images and sounds in Base64 encoding) but with GSON streaming I can overcome that error because it reads the data per token not all at once. And for Jackson JSON library I think it also have streaming API and how to use it almost same with my implementation with google GSON. I hope my answer can help you and if you have another question about my answer feel free to ask in the comment :)

XML SAXParser reformat if else Java/Android

I have the following problem:
I am using an XML SAXParser to parse an xml file and create dynamicly classes and set their properties.
I have written code that works now to make 4 classes and set the properiets of the classes but the problem is that the code is one big conditional case (if/else if/else) and that it is very difficult to read.
I would like to parse the xml so I can create 15 different classes, so the code is getting very big.
Now the exact question is how to refactor the if/elseif/else to better readable code? I've searched around for a while now and found some methods like using a map or the command pattern but I don't understand how to use this?
This is the code I'm currently using and that is working:
public class XmlParserSax extends DefaultHandler {
List<Fragment> fragments = null;
String atType = null;
String typeObject;
String currentelement = null;
String atColor = null;
RouteFragment route = null;
ChapterFragment chapter = null;
FirstFragment first = null;
ExecuteFragment execute = null;
StringBuilder textBuilder;
public XmlParserSax() {
fragments = new ArrayList<Fragment>();
try {
/**
* Create a new instance of the SAX parser
**/
SAXParserFactory saxPF = SAXParserFactory.newInstance();
SAXParser sp = saxPF.newSAXParser();
XMLReader xr = sp.getXMLReader();
/**
* Create the Handler to handle each of the XML tags.
**/
String file = "assets/test.xml";
InputStream in = this.getClass().getClassLoader()
.getResourceAsStream(file);
xr.setContentHandler(this);
xr.parse(new InputSource(in));
} catch (Exception e) {
System.out.println(e);
}
}
#Override
public void startElement(String uri, String localName, String qName,
Attributes attributes) throws SAXException {
atColor = attributes.getValue("color");
atType = attributes.getValue("type");
currentelement = localName;
textBuilder = new StringBuilder();
if (localName.equalsIgnoreCase("template")) {
if (atType.equalsIgnoreCase("route")) {
route = new RouteFragment();
typeObject = "route";
} else if (atType.equalsIgnoreCase("chapter")) {
chapter = new ChapterFragment();
typeObject = "chapter";
} else if (atType.equalsIgnoreCase("first")) {
first = new FirstFragment();
typeObject = "first";
} else if (atType.equalsIgnoreCase("execute")) {
execute = new ExecuteFragment();
typeObject = "execute";
}
} else if (localName.equalsIgnoreCase("number")) {
if (typeObject.equalsIgnoreCase("chapter")) {
chapter.setNumberTextcolor("#" + atColor);
}
} else if (localName.equalsIgnoreCase("maxnumber")) {
if (typeObject.equalsIgnoreCase("chapter")) {
chapter.setMaxNumberColor("#" + atColor);
}
} else if (localName.equalsIgnoreCase("title")) {
if (typeObject.equalsIgnoreCase("chapter")) {
chapter.setTitleColor("#" + atColor);
} else if (typeObject.equalsIgnoreCase("first")) {
first.setTitleColor("#" + atColor);
}
} else if (localName.equalsIgnoreCase("subtitle")) {
if (typeObject.equalsIgnoreCase("first")) {
first.setSubtitleColor("#" + atColor);
}
} else if (localName.equalsIgnoreCase("text")) {
if (typeObject.equalsIgnoreCase("execute")) {
execute.setTextColor("#" + atColor);
}
}
}
#Override
public void endElement(String uri, String localName, String qName)
throws SAXException {
String text = textBuilder.toString();
if (localName.equalsIgnoreCase("template")) {
if (typeObject.equalsIgnoreCase("route")) {
fragments.add(route); // nieuw routefragment
// toevoegen aan de lijst
} else if (typeObject.equalsIgnoreCase("chapter")) {
fragments.add(chapter); // nieuw chapterfragment
// toevoegen aan de lijst
} else if (typeObject.equalsIgnoreCase("first")) {
fragments.add(first);
} else if (typeObject.equalsIgnoreCase("execute")) {
fragments.add(execute);
}
} else if (localName.equalsIgnoreCase("text")) {
if (typeObject.equalsIgnoreCase("route")) {
// route.setOmschrijving(text);
} else if (typeObject.equalsIgnoreCase("execute")) {
execute.setText(text);
}
} else if (localName.equalsIgnoreCase("background")) {
if (typeObject.equalsIgnoreCase("route")) {
// route.setKleur("#" + text);
} else if (typeObject.equalsIgnoreCase("chapter")) {
chapter.setBackgroundColor("#" + text);
} else if (typeObject.equalsIgnoreCase("first")) {
first.setBackgroundColor("#" + text);
} else if (typeObject.equalsIgnoreCase("execute")) {
execute.setBackgroundColor("#" + text);
}
} else if (localName.equalsIgnoreCase("number")) {
if (typeObject.equalsIgnoreCase("chapter")) {
chapter.setNumber(text);
}
} else if (localName.equalsIgnoreCase("maxnumber")) {
if (typeObject.equalsIgnoreCase("chapter")) {
chapter.setMaxNumber(text);
}
} else if (localName.equalsIgnoreCase("title")) {
if (typeObject.equalsIgnoreCase("chapter")) {
chapter.setTitle(text);
} else if (typeObject.equalsIgnoreCase("first")) {
first.setTitle(text);
}
} else if (localName.equalsIgnoreCase("subtitle")) {
if (typeObject.equalsIgnoreCase("first")) {
first.setSubtitle(text);
}
} else if (localName.equalsIgnoreCase("square")) {
if (typeObject.equalsIgnoreCase("execute")) {
execute.setBorderColor("#" + text);
}
}
}
public List<Fragment> getList() {
return fragments;
}
#Override
public void characters(char[] ch, int start, int length)
throws SAXException {
textBuilder.append(ch, start, length);
}
}
There is another way of doing this; using startElementListener and EndTextElementListeners
First define your root element:
RootElement root = new RootElement("root");
Define your child elements
Element nodeA = root.getChild("nodeA");
Element nodeB = root.getChild("nodeB");
Element nodeC = root.getChild("nodeC");
Now set the listeners
root.setStartElementListener(new StartElementListener() {
public void start(Attributes attributes) {
foundElement = true;// tells you that you are parsing the intended xml
}
});
nodeA.setEndTextElementListener(new EndTextElementListener() {
public void end(String body) {
//populate your pojo
}
});
This way you can do away with all those if-else statements and booleans, but you have to live with the N number of listeners.

Categories

Resources