I'm using an embedded Neo4j instance in my Spring Boot project (I'm using Spring JPA and Neo4j separately and I'm not using Spring-Boot-Neo4j stuff) and I want to visualise the graph I build using the Neo4j browser that I downloaded from here with the addition of a custom init.coffee that allows it to display images inside the nodes
This is the code I use in order to build my graph. The "buildGraph" function is executed when a request is being received by one of my RestControllers
(the reason I include all of my business logic implementation is because it may help detecting relationships/nodes etc being created/handled in a wrong way)
#Component
public class GraphBuilder
{
private String dbPath;
private int numberOFConnectionsForKeyIndividuals;
private String neo4jBoltAddress;
#Autowired
PersonService personService;
private final GraphDatabaseService graphDb;
public GraphBuilder(#Value("${dbPath}") String dbPath,
#Value("${neo4jBoltAddress}") String neo4jBoltAddress,
#Value("${numberOFConnectionsForKeyIndividuals}") int numberOFConnectionsForKeyIndividuals)
{
GraphDatabaseSettings.BoltConnector bolt = GraphDatabaseSettings.boltConnector( "0" );
this.dbPath = dbPath;
this.neo4jBoltAddress = neo4jBoltAddress;
this.numberOFConnectionsForKeyIndividuals = numberOFConnectionsForKeyIndividuals;
graphDb = new GraphDatabaseFactory()
.newEmbeddedDatabaseBuilder( new File(dbPath) )
.setConfig( bolt.type, "BOLT" )
.setConfig( bolt.enabled, "true" )
.setConfig( bolt.address, neo4jBoltAddress )
.newGraphDatabase();
registerShutdownHook( graphDb );
}
public void buildGraph()
{
List<Long> AsIDs = new ArrayList<>();
Map<Long,Long> personIdToNodeMap = new HashMap<>();
Map<String,List<Long>> nameToId = new HashMap<>();
Map<Long,List<Association>> associationsMap = new HashMap<>();
try ( Transaction tx = graphDb.beginTx() )
{
Schema schema = graphDb.schema();
for(Person person : personService.findAllPersons())
{
Node personNode = graphDb.createNode(new Label() {
#Override
public String name() {
return "Person";
}
});
//mapping persons to their respective nodes
personIdToNodeMap.put(person.getPersonId(),personNode.getId());
//mapping names to the ids of the persons
if(nameToId.get(person.getName()) == null)
{
nameToId.put(person.getName(), new ArrayList<>());
}
nameToId.get(person.getName()).add(person.getPersonId());
personNode.setProperty("Name", person.getName());
for(int a = 0 ; a < person.getAliases().size() ; a++)
{
personNode.setProperty("Alias " + a+1, person.getAliases().get(a).getAlias());
}
personNode.setProperty("Additional Name Information", person.getAdditionalNameInformation() != null ? person.getAdditionalNameInformation() : "");
personNode.setProperty("Id", person.getPersonId());
personNode.setProperty("Date of Birth", person.getDob() != null ? person.getDob() : "");
for(int f = 0 ; f < person.getFacebook().size() ; f++)
{
personNode.setProperty("Facebook " + f+1, person.getFacebook().get(f).getFacebookPage() + " (" + person.getFacebook().get(f).getAdditionalFacebookPageInformation() + ")");
}
personNode.setProperty("Additional Information", person.getInfo() != null ? person.getInfo() : "");
personNode.setProperty("image_url","http://localhost:8888/files/"+person.getPictureFilePath());
personNode.setProperty("Node Type", "Person");
if(person.getAssociations().size() > numberOFConnectionsForKeyIndividuals)
{
personNode.setProperty("Key_Individual","Yes");
}
for(A A : person.getAs())
{
Node ANode = graphDb.createNode(new Label() {
#Override
public String name() {
return "A";
}
});
ANode.setProperty("A", A.getA());
//TODO elaborate more on the A with additional properties
ANode.setProperty("Node Type", "A");
personNode.createRelationshipTo( ANode, EdgeTypes.HAS_DONE );
ANode.setProperty("image_url","http://localhost:8888/images/A.png");
AsIDs.add(ANode.getId());
}
for(Association association : person.getAssociations())
{
if(associationsMap.get(person.getPersonId()) == null)
{
associationsMap.put(person.getPersonId(), new ArrayList<>());
}
associationsMap.get(person.getPersonId()).add(association);
}
}
//Validating and building the association edges
//iterating through the nodes
for(Long personFromId : associationsMap.keySet())
{
//iterating through the associations registered for the node
for(Association associationFrom : associationsMap.get(personFromId))
{
String personNameFrom = associationFrom.getPersonNameFrom();
String personNameTo = associationFrom.getPersonNameTo();
//iterating through the persons whose name matches the other end of the association
if(nameToId.get(personNameTo) != null)
{
for(Long personToId : nameToId.get(personNameTo))
{
//iterating through the associations of the person at the other end of the association
if(associationsMap.get(personToId) != null)
{
List<Association> associationsToRemove = new ArrayList<>();
for(Association associationTo : associationsMap.get(personToId))
{
if(associationTo.getPersonNameTo().equals(personNameFrom) && nameToId.get(personNameFrom).contains(personFromId))
{
if(nameToId.get(personNameFrom).size() == 1)
{
Relationship relationship = graphDb.getNodeById(personIdToNodeMap.get(personFromId))
.createRelationshipTo( graphDb.getNodeById(personIdToNodeMap.get(personToId)), EdgeTypes.ASSOCIATES_WITH );
if(associationFrom.getType() != null)
{
relationship.setProperty("Association Type",associationFrom.getType());
}
associationsToRemove.add(associationTo);
}
else
{
boolean alreadyConnected = false;
for(Relationship rel : graphDb.getNodeById(personIdToNodeMap.get(personFromId)).getRelationships())
{
if( ( rel.getOtherNode(graphDb.getNodeById(personIdToNodeMap.get(personFromId))).
equals(graphDb.getNodeById(personIdToNodeMap.get(personToId))) ) )
{
alreadyConnected = true;
break;
}
}
if(!alreadyConnected)
{
Relationship relationship = graphDb.getNodeById(personIdToNodeMap.get(personFromId))
.createRelationshipTo( graphDb.getNodeById(personIdToNodeMap.get(personToId)), EdgeTypes.PROBABLY_ASSOCIATES_WITH );
if(associationFrom.getType() != null)
{
relationship.setProperty("Association Type",associationFrom.getType());
}
}
// associationsToRemove.add(associationTo);
}
}
}
associationsMap.get(personToId).removeAll(associationsToRemove);
}
}
}
}
}
tx.success();
}
Map<Long,List<String>> AToNamesMap = new HashMap<>();
//detecting names referred in the A's description
try(Transaction txAs = graphDb.beginTx() )
{
for(Long id : AsIDs)
{
Node ANode = graphDb.getNodeById(id);
String A = (String) ANode.getProperty("A");
for(String name : nameToId.keySet())
{
if(A.contains(name)) {
if(AToNamesMap.get(id) == null)
{
AToNamesMap.put(id,new ArrayList<>());
}
AToNamesMap.get(id).add(name);
}
}
}
List<Long> groupedAs = new ArrayList<>();
for(Long id : AsIDs)
{
if(AToNamesMap.get(id)!= null && AToNamesMap.get(id).size() > 1)
{
for(Long otherAID : AToNamesMap.keySet())
{
if(id != otherAID && !groupedAs.contains(otherAID) && !groupedAs.contains(id))
{
if(compareNamesLists(AToNamesMap.get(id), AToNamesMap.get(otherAID)))
{
Relationship rel = graphDb.getNodeById(otherAID).getSingleRelationship(EdgeTypes.HAS_DONE,Direction.INCOMING);
Node otherPersonNode = rel.getStartNode();
if(nameToId.get(otherPersonNode.getProperty("Name")) != null && nameToId.get(otherPersonNode.getProperty("Name")).size() > 1)
{
otherPersonNode.createRelationshipTo(graphDb.getNodeById(id), EdgeTypes.HAS_PROBABLY_DONE);
}
else
{
otherPersonNode.createRelationshipTo(graphDb.getNodeById(id), EdgeTypes.HAD_DONE);
}
rel.delete();
graphDb.getNodeById(otherAID).delete();
groupedAs.add(otherAID);
}
}
}
}
groupedAs.add(id);
}
txAs.success();
}
}
private static void registerShutdownHook( final GraphDatabaseService graphDb )
{
// Registers a shutdown hook for the Neo4j instance so that it
// shuts down nicely when the VM exits (even if you "Ctrl-C" the
// running application).
Runtime.getRuntime().addShutdownHook( new Thread()
{
#Override
public void run()
{
graphDb.shutdown();
}
} );
}
}
When I open the browser and connect it to the bolt that I expose for my embedded neo4j, the browser is able to show all the nodes in my graph database (at the moment less than 100) and it then freezes, causing the entire system to freeze (MacBook Pro 2016, 16GB). This happens around 3/5 times.
I know that the way I make my transactions is not ideal, but as I said all this processing happens before the neo4j browser starts.
Can you advise me on how to solve this issue?
Can you see anything in my code (connection left open etc) Is this a known issue for the neo4j browser?
I have set LightSIDE plugin and can run properly, but I don't know why I can't save my data to empty file? This is what a simple structure I made.
Activity is the list data that need to be categorize.
I have 3 categories and each of them have each type.
I already define each category with specific list of Words. For example : Food ({Sushi, Food, Japan}, {Cap Jay, Food, Chinese}, {Jog, Sport, Running}, ...)
And this is how I save my prediction with LightSIDE.
public void predictSectionType(String[] sections, List<String> activityList) {
LightSideService currentLightsideHelper = new LightSideService();
Recipe newRecipe;
// Initialize SIDEPlugin
currentLightsideHelper.initSIDEPlugin();
try {
// Load Recipe with Extracted Features & Trained Models
ClassLoader myClassLoader = getClass().getClassLoader();
newRecipe = ConverterControl.readFromXML(new InputStreamReader(myClassLoader.getResourceAsStream("static/lightsideTrainingResult/trainingData.xml")));
// Predict Result Data
Recipe recipeToPredict = currentLightsideHelper.loadNewDocumentsFromCSV(sections); // DocumentList & Recipe Created
currentLightsideHelper.predictLabels(recipeToPredict, newRecipe);
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
I have class of LightSideService as Summary Class of LightSIDE function.
public class LightSideService {
// Extract Features Parameters
final String featureTableName = "1Grams";
final int featureThreshold = 2;
final String featureAnnotation = "Code";
final Type featureType = Type.NOMINAL;
// Build Models Parameters
final String trainingResultName = "Bayes_1Grams";
// Predict Labels Parameters
final String predictionColumnName = featureAnnotation + "_Prediction";
final boolean showMaxScore = false;
final boolean showDists = true;
final boolean overwrite = false;
final boolean useEvaluation = false;
public DocumentListTableModel model = new DocumentListTableModel(null);
public Map<String, Serializable> validationSettings = new TreeMap<String, Serializable>();
public Map<FeaturePlugin, Boolean> featurePlugins = new HashMap<FeaturePlugin, Boolean>();
public Map<LearningPlugin, Boolean> learningPlugins = new HashMap<LearningPlugin, Boolean>();
public Collection<ModelMetricPlugin> modelEvaluationPlugins = new ArrayList<ModelMetricPlugin>();
public Map<WrapperPlugin, Boolean> wrapperPlugins = new HashMap<WrapperPlugin, Boolean>();
// Initialize Data ==================================================
public void initSIDEPlugin() {
SIDEPlugin[] featureExtractors = PluginManager.getSIDEPluginArrayByType("feature_hit_extractor");
boolean selected = true;
for (SIDEPlugin fe : featureExtractors) {
featurePlugins.put((FeaturePlugin) fe, selected);
selected = false;
}
SIDEPlugin[] learners = PluginManager.getSIDEPluginArrayByType("model_builder");
for (SIDEPlugin le : learners) {
learningPlugins.put((LearningPlugin) le, true);
}
SIDEPlugin[] tableEvaluations = PluginManager.getSIDEPluginArrayByType("model_evaluation");
for (SIDEPlugin fe : tableEvaluations) {
modelEvaluationPlugins.add((ModelMetricPlugin) fe);
}
SIDEPlugin[] wrappers = PluginManager.getSIDEPluginArrayByType("learning_wrapper");
for (SIDEPlugin wr : wrappers) {
wrapperPlugins.put((WrapperPlugin) wr, false);
}
}
//Used to Train Models, adjust parameters according to model
public void initValidationSettings(Recipe currentRecipe) {
validationSettings.put("testRecipe", currentRecipe);
validationSettings.put("testSet", currentRecipe.getDocumentList());
validationSettings.put("annotation", "Age");
validationSettings.put("type", "CV");
validationSettings.put("foldMethod", "AUTO");
validationSettings.put("numFolds", 10);
validationSettings.put("source", "RANDOM");
validationSettings.put("test", "true");
}
// Load CSV Doc ==================================================
public Recipe loadNewDocumentsFromCSV(String filePath) {
DocumentList testDocs;
testDocs = chooseDocumentList(filePath);
if (testDocs != null) {
testDocs.guessTextAndAnnotationColumns();
Recipe currentRecipe = Recipe.fetchRecipe();
currentRecipe.setDocumentList(testDocs);
return currentRecipe;
}
return null;
}
public Recipe loadNewDocumentsFromCSV(String[] rootCauseList) {
DocumentList testDocs;
testDocs = chooseDocumentList(rootCauseList);
if (testDocs != null) {
testDocs.guessTextAndAnnotationColumns();
Recipe currentRecipe = Recipe.fetchRecipe();
currentRecipe.setDocumentList(testDocs);
return currentRecipe;
}
return null;
}
protected DocumentList chooseDocumentList(String filePath) {
TreeSet<String> docNames = new TreeSet<String>();
docNames.add(filePath);
try {
DocumentList testDocs;
Charset encoding = Charset.forName("UTF-8");
{
testDocs = ImportController.makeDocumentList(docNames, encoding);
}
return testDocs;
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
protected DocumentList chooseDocumentList(String[] rootCauseList) {
try {
DocumentList testDocs;
testDocs = new DocumentList();
testDocs.setName("TestData.csv");
List<String> codes = new ArrayList();
List<String> roots = new ArrayList();
for (String s : rootCauseList) {
codes.add("");
roots.add((s != null) ? s : "");
}
testDocs.addAnnotation("Code", codes, false);
testDocs.addAnnotation("Root Cause Failure Description", roots, false);
return testDocs;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
// Save/Load XML ==================================================
public void saveRecipeToXml(Recipe currentRecipe, String filePath) {
File f = new File(filePath);
try {
ConverterControl.writeToXML(f, currentRecipe);
} catch (Exception e) {
e.printStackTrace();
}
}
public Recipe loadRecipeFromXml(String filePath) throws FileNotFoundException, IOException {
Recipe currentRecipe = ConverterControl.loadRecipe(filePath);
return currentRecipe;
}
// Extract Features ==================================================
public Recipe prepareBuildFeatureTable(Recipe currentRecipe) {
// Add Feature Plugins
Collection<FeaturePlugin> plugins = new TreeSet<FeaturePlugin>();
for (FeaturePlugin plugin : featurePlugins.keySet()) {
String pluginString = plugin.toString();
if (pluginString == "Basic Features" || pluginString == "Character N-Grams") {
plugins.add(plugin);
}
}
// Generate Plugin into Recipe
currentRecipe = Recipe.addPluginsToRecipe(currentRecipe, plugins);
// Setup Plugin configurations
OrderedPluginMap currentOrderedPluginMap = currentRecipe.getExtractors();
for (SIDEPlugin plugin : currentOrderedPluginMap.keySet()) {
String pluginString = plugin.toString();
Map<String, String> currentConfigurations = currentOrderedPluginMap.get(plugin);
if (pluginString == "Basic Features") {
for (String s : currentConfigurations.keySet()) {
if (s == "Unigrams" || s == "Bigrams" || s == "Trigrams" ||
s == "Count Occurences" || s == "Normalize N-Gram Counts" ||
s == "Stem N-Grams" || s == "Skip Stopwords in N-Grams") {
currentConfigurations.put(s, "true");
} else {
currentConfigurations.put(s, "false");
}
}
} else if (pluginString == "Character N-Grams") {
for (String s : currentConfigurations.keySet()) {
if (s == "Include Punctuation") {
currentConfigurations.put(s, "true");
} else if (s == "minGram") {
currentConfigurations.put(s, "3");
} else if (s == "maxGram") {
currentConfigurations.put(s, "4");
}
}
currentConfigurations.put("Extract Only Within Words", "true");
}
}
// Build FeatureTable
currentRecipe = buildFeatureTable(currentRecipe, featureTableName, featureThreshold, featureAnnotation, featureType);
return currentRecipe;
}
protected Recipe buildFeatureTable(Recipe currentRecipe, String name, int threshold, String annotation, Type type) {
FeaturePlugin activeExtractor = null;
try {
Collection<FeatureHit> hits = new HashSet<FeatureHit>();
for (SIDEPlugin plug : currentRecipe.getExtractors().keySet()) {
activeExtractor = (FeaturePlugin) plug;
hits.addAll(activeExtractor.extractFeatureHits(currentRecipe.getDocumentList(), currentRecipe.getExtractors().get(plug)));
}
FeatureTable ft = new FeatureTable(currentRecipe.getDocumentList(), hits, threshold, annotation, type);
ft.setName(name);
currentRecipe.setFeatureTable(ft);
} catch (Exception e) {
System.err.println("Feature Extraction Failed");
e.printStackTrace();
}
return currentRecipe;
}
// Build Models ==================================================
public Recipe prepareBuildModel(Recipe currentRecipe) {
try {
// Get Learner Plugins
LearningPlugin learner = null;
for (LearningPlugin plugin : learningPlugins.keySet()) {
/* if (plugin.toString() == "Naive Bayes") */
if (plugin.toString() == "Logistic Regression") {
learner = plugin;
}
}
if (Boolean.TRUE.toString().equals(validationSettings.get("test"))) {
if (validationSettings.get("type").equals("CV")) {
validationSettings.put("testSet", currentRecipe.getDocumentList());
}
}
Map<String, String> settings = learner.generateConfigurationSettings();
currentRecipe = Recipe.addLearnerToRecipe(currentRecipe, learner, settings);
currentRecipe.setValidationSettings(new TreeMap<String, Serializable>(validationSettings));
for (WrapperPlugin wrap : wrapperPlugins.keySet()) {
if (wrapperPlugins.get(wrap)) {
currentRecipe.addWrapper(wrap, wrap.generateConfigurationSettings());
}
}
buildModel(currentRecipe, validationSettings);
} catch (Exception e) {
e.printStackTrace();
}
return currentRecipe;
}
protected void buildModel(Recipe currentRecipe,
Map<String, Serializable> validationSettings) {
try {
FeatureTable currentFeatureTable = currentRecipe.getTrainingTable();
if (currentRecipe != null) {
TrainingResult results = null;
/*
* if (validationSettings.get("type").equals("SUPPLY")) {
* DocumentList test = (DocumentList)
* validationSettings.get("testSet"); FeatureTable
* extractTestFeatures = prepareTestFeatureTable(currentRecipe,
* validationSettings, test);
* validationSettings.put("testFeatureTable",
* extractTestFeatures);
*
* // if we've already trained the exact same model, don't // do
* it again. Just evaluate. Recipe cached =
* checkForCachedModel(); if (cached != null) { results =
* evaluateUsingCachedModel(currentFeatureTable,
* extractTestFeatures, cached, currentRecipe); } }
*/
if (results == null) {
results = currentRecipe.getLearner().train(currentFeatureTable, currentRecipe.getLearnerSettings(), validationSettings, currentRecipe.getWrappers());
}
if (results != null) {
currentRecipe.setTrainingResult(results);
results.setName(trainingResultName);
currentRecipe.setLearnerSettings(currentRecipe.getLearner().generateConfigurationSettings());
currentRecipe.setValidationSettings(new TreeMap<String, Serializable>(validationSettings));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
protected static FeatureTable prepareTestFeatureTable(Recipe recipe, Map<String, Serializable> validationSettings, DocumentList test) {
prepareDocuments(recipe, validationSettings, test); // assigns classes, annotations.
Collection<FeatureHit> hits = new TreeSet<FeatureHit>();
OrderedPluginMap extractors = recipe.getExtractors();
for (SIDEPlugin plug : extractors.keySet()) {
Collection<FeatureHit> extractorHits = ((FeaturePlugin) plug).extractFeatureHits(test, extractors.get(plug));
hits.addAll(extractorHits);
}
FeatureTable originalTable = recipe.getTrainingTable();
FeatureTable ft = new FeatureTable(test, hits, 0, originalTable.getAnnotation(), originalTable.getClassValueType());
for (SIDEPlugin plug : recipe.getFilters().keySet()) {
ft = ((RestructurePlugin) plug).filterTestSet(originalTable, ft, recipe.getFilters().get(plug), recipe.getFilteredTable().getThreshold());
}
ft.reconcileFeatures(originalTable.getFeatureSet());
return ft;
}
protected static Map<String, Serializable> prepareDocuments(Recipe currentRecipe, Map<String, Serializable> validationSettings, DocumentList test) throws IllegalStateException {
DocumentList train = currentRecipe.getDocumentList();
try {
test.setCurrentAnnotation(currentRecipe.getTrainingTable().getAnnotation(), currentRecipe.getTrainingTable().getClassValueType());
test.setTextColumns(new HashSet<String>(train.getTextColumns()));
test.setDifferentiateTextColumns(train.getTextColumnsAreDifferentiated());
Collection<String> trainColumns = train.allAnnotations().keySet();
Collection<String> testColumns = test.allAnnotations().keySet();
if (!testColumns.containsAll(trainColumns)) {
ArrayList<String> missing = new ArrayList<String>(trainColumns);
missing.removeAll(testColumns);
throw new java.lang.IllegalStateException("Test set annotations do not match training set.\nMissing columns: " + missing);
}
validationSettings.put("testSet", test);
} catch (Exception e) {
e.printStackTrace();
throw new java.lang.IllegalStateException("Could not prepare test set.\n" + e.getMessage(), e);
}
return validationSettings;
}
//Predict Labels ==================================================
public void predictLabels(Recipe recipeToPredict, Recipe currentRecipe) {
DocumentList newDocs = null;
DocumentList originalDocs;
if (useEvaluation) {
originalDocs = recipeToPredict.getTrainingResult().getEvaluationTable().getDocumentList();
TrainingResult results = currentRecipe.getTrainingResult();
List<String> predictions = (List<String>) results.getPredictions();
newDocs = addLabelsToDocs(predictionColumnName, showDists, overwrite, originalDocs, results, predictions, currentRecipe.getTrainingTable());
} else {
originalDocs = recipeToPredict.getDocumentList();
Predictor predictor = new Predictor(currentRecipe, predictionColumnName);
newDocs = predictor.predict(originalDocs, predictionColumnName, showDists, overwrite);
}
// Predict Labels result
model.setDocumentList(newDocs);
}
protected DocumentList addLabelsToDocs(final String name, final boolean showDists, final boolean overwrite, DocumentList docs, TrainingResult results, List<String> predictions, FeatureTable currentFeatureTable) {
Map<String, List<Double>> distributions = results.getDistributions();
DocumentList newDocs = docs.clone();
newDocs.addAnnotation(name, predictions, overwrite);
if (distributions != null) {
if (showDists) {
for (String label : currentFeatureTable.getLabelArray()) {
List<String> dist = new ArrayList<String>();
for (int i = 0; i < predictions.size(); i++) {
dist.add(String.format("%.3f", distributions.get(label).get(i)));
}
newDocs.addAnnotation(name + "_" + label + "_score", dist, overwrite);
}
}
}
return newDocs;
}
// ==================================================
}
David. It looks like the above replicates a lot of the functionality from the edu.cmu.side.recipe package. However, it doesn't look like your predictSectionType() method actually outputs the model's predictions anywhere.
If what you're trying to do is indeed to save predictions on new data using a trained model, check out the edu.cmu.side.recipe.Predictor class. It takes a trained model path as input, It's used by the scripts/predict.sh convenience script, but you could repurpose its main method if you needed to call it programmatically.
I hope this helps!
I am trying to implement paging in an already running application, it works like mongodb where you get a query with bunch of parameters like
searchConditions ,limit( how many docs you want), skip ( skip these many docs)
and so on
and the response is back in json my task is to generate the previous and next link to the query that means previous page and the next page as per query
I came up with a method below
public Paging generateLink(RequestFilter filter) {
int prev_skip;
int prev_limit;
String pagePrev = "";
String pageNext = "";
int next_skip;
int next_limit;
String searchCondition = JsonUtils.toSimpleJsonString(filter.getSearch());
String url = "http://isgswmdi1n1.nam.nsroot.net:7014/account-search/rest/api/v1/pmm";
//properties.getProperty("paging.link");
System.out.println(url);
Paging pagingOption = new Paging();
Result result = new Result();
int count = luceneSearchService.searchCount(filter);
try {
if (count > 1) {
if (filter.getSkip() > 0) {
prev_skip = Math.max((filter.getSkip() - 1), 0);
prev_limit = filter.getLimit();
pagePrev = url + "?search=" + searchCondition + "&skip=" + prev_skip + "$limit=" + prev_limit;
} else{
result.setPaging(null);
return null;
}
if (count > (filter.getLimit() + filter.getSkip())) {
next_skip = (filter.getSkip() + filter.getLimit());
next_limit = filter.getLimit();
pageNext = url + "?search=" + searchCondition + "&skip=" + next_skip + "$limit=" + next_limit;
} else{
result.setPaging(null);
return null;
}
pagingOption.setPagePrev(pagePrev);
pagingOption.setPageNext(pageNext);
result.setPaging(pagingOption);
}
return pagingOption;
} catch (NullPointerException n)
{
n.printStackTrace();
return pagingOption;
}
}
call to generateLink method
return Result.ok(resultRow, generateLink(filter));
the ok response method in the Result class
public static Result ok(List<ResultRow> resultRows, Paging paging) {
if (paging != null) {
return new Result(resultRows, 200, "Completed", paging);
} else
return new Result(resultRows, 200, "Completed");
}
Underlying paging class
public class Paging implements Serializable {
public String getPagePrev() {
return pagePrev;
}
public void setPagePrev(String pagePrev) {
this.pagePrev = pagePrev;
}
#JsonProperty("pagePrev")
private String pagePrev;
public String getPageNext() {
return pageNext;
}
public void setPageNext(String pageNext) {
this.pageNext = pageNext;
}
#JsonProperty("pageNext")
private String pageNext;
}
but my test method is failing
#Test
public void search_allFilterParamsAreDefined_hp() throws Exception {
// given
Map<String, Serializable> searchConditions = singletonMap("shortName", "GO");
List<String> returnFields = asList("shortname", "gfcid");
String returnFieldsStr = returnFields.stream().collect(joining(","));
RequestFilter filter = RequestFilter.create(searchConditions, returnFieldsStr, 5, 10, true);
int resultSize = 20;
List<ResultRow> searchResult = getSearchResult(resultSize);
when(luceneSearchService.search(filter)).thenReturn(searchResult);
when(luceneSearchService.searchCount(filter)).thenReturn(resultSize);
// do
Result result = searchCoordinator.search(filter);
// verify
assertEquals(searchResult, result.getRows());
assertReturnFields(returnFields, result.getRows());
assertNotNull(result.getPaging());
Map<String, String> nextParams = getQueryParams(result.getPaging().getPageNext());
assertParam(nextParams, "search", toSimpleJsonString(searchConditions));
assertParam(nextParams, "skip", "15");
assertParam(nextParams, "limit", "10");
}
I have created folder using superuser and provided read-only access to folder for application user.
When trying to query all accessible folders(nt:folder), getting properties list as empty.
Partial code to reproduce:
Created folder:
public Node createFolder(Session adminSession) {
try {
Node parentNode = adminSession.getNode("/MyCompany/CommonFolder”);
if(!parentNode.hasNode("T1")){
Node node = parentNode.addNode("T1", "nt:folder");
node.addMixin("et:folderProperties");
node.setProperty("et:folderName", "T1");
node.addMixin("rep:AccessControllable");
session.save(); return node;
}else {
System.out.println("Node already exists");
}
} catch (RepositoryException e) {
e.printStackTrace();
}
return null;
}
Sharing to user(Principal based)
accessControlManager = (JackrabbitAccessControlManager)
adminSession.getAccessControlManager();
accessControlPolicy = accessControlManager.getApplicablePolicies(userPrincipal);
// for ex., principal is appuser1
if(accessControlPolicy != null && accessControlPolicy.length > 0) {
accessControlList = (JackrabbitAccessControlList) accessControlPolicy[0];
}else {
accessControlPolicy = accessControlManager.getPolicies(userPrincipal);
accessControlList = (JackrabbitAccessControlList) accessControlPolicy[0];
}
ValueFactory valueFactory = adminSession.getValueFactory();
//Tried all combinations, even providing with "JCR:ALL";
Privilege[] readPrivilege = new javax.jcr.security.Privilege[] {
accessControlManager.privilegeFromName(
javax.jcr.security.Privilege.JCR_READ),
accessControlManager.privilegeFromName(
javax.jcr.security.Privilege.JCR_NODE_TYPE_MANAGEMENT),
accessControlManager.privilegeFromName(
javax.jcr.security.Privilege.JCR_READ_ACCESS_CONTROL)};
Map<String, Value> restrictions = new HashMap<String, Value>();
restrictions.put("rep:nodePath", valueFactory.createValue("/MyCompany/CommonFolder/T1",
PropertyType.PATH));
restrictions.put("rep:glob", valueFactory.createValue(""));
accessControlList.addEntry(userPrincipal, privileges, true , restrictions);
accessControlManager.setPolicy(accessControlList.getPath(), accessControlList);
adminSession.save();
Printing all applicable folders for user
public void printAllFolders(Session userSession) {
QueryManager queryManager;
try {
queryManager = userSession.getWorkspace().getQueryManager();
String sql = "SELECT * FROM [nt:folder]";
Query query= queryManager.createQuery(sql, Query.JCR_SQL2);
QueryResult result = query.execute();
NodeIterator nodeIterator = result.getNodes();
System.out.println("Printing all applicable folders");
while(nodeIterator.hasNext()) {
Node node = nodeIterator.nextNode();
System.out.println("Folder Name:" + node.getName() + "; path: " + node.getPath());
PropertyIterator pIterator = node.getProperties();
while (pIterator.hasNext()){ //Returning empty for path "/MyCompany/CommonFolder/T1"
Property property = pIterator.nextProperty();
if (property.getDefinition().isMultiple()) {
Value[] values = property.getValues();
for(Value v11: values) {
QValueValue value = (QValueValue)v11;
System.out.println(String.format("Multi-valued property for node:
'%s' - %s has values",node.getName(),
property.getName() ,value.getString()));
}
} else {
QValueValue value = (QValueValue) property.getValue();
String strValue = value.getString();
System.out.println(String.format("property for node: '%s' - %s has value
%s",node.getName(),property.getName(),strValue));
}
}
}
} catch (RepositoryException e) {
e.printStackTrace();
}
}
using Jackrabbit(2.6.0 version) and JCR( 2.0 version).
Node child = cl.addNode("ONE");
child.setProperty("message", ("CL Child" + i));
session.save();
PropertyIterator iter = child.getProperties();
System.out.println("Size" + iter.getSize());
while (iter.hasNext()) {
PropertyImpl key = (PropertyImpl) iter.next();
String value = key.getString();
System.out.println("------------->" + key);
System.out.println("------------->" + value);
}
I am writing an app for Android that grabs meta data from SHOUTcast mp3 streams. I am using a pretty nifty class I found online that I slightly modified, but I am still having 2 problems.
1) I have to continuously ping the server to update the metadata using a TimerTask. I am not fond of this approach but it was all I could think of.
2) There is a metric tonne of garbage collection while my app is running. Removing the TimerTask got rid of the garbage collection issue so I am not sure if I am just doing it wrong or if this is normal.
Here is the class I am using:
public class IcyStreamMeta {
protected URL streamUrl;
private Map<String, String> metadata;
private boolean isError;
public IcyStreamMeta(URL streamUrl) {
setStreamUrl(streamUrl);
isError = false;
}
/**
* Get artist using stream's title
*
* #return String
* #throws IOException
*/
public String getArtist() throws IOException {
Map<String, String> data = getMetadata();
if (!data.containsKey("StreamTitle"))
return "";
try {
String streamTitle = data.get("StreamTitle");
String title = streamTitle.substring(0, streamTitle.indexOf("-"));
return title.trim();
}catch (StringIndexOutOfBoundsException e) {
return "";
}
}
/**
* Get title using stream's title
*
* #return String
* #throws IOException
*/
public String getTitle() throws IOException {
Map<String, String> data = getMetadata();
if (!data.containsKey("StreamTitle"))
return "";
try {
String streamTitle = data.get("StreamTitle");
String artist = streamTitle.substring(streamTitle.indexOf("-")+1);
return artist.trim();
} catch (StringIndexOutOfBoundsException e) {
return "";
}
}
public Map<String, String> getMetadata() throws IOException {
if (metadata == null) {
refreshMeta();
}
return metadata;
}
public void refreshMeta() throws IOException {
retreiveMetadata();
}
private void retreiveMetadata() throws IOException {
URLConnection con = streamUrl.openConnection();
con.setRequestProperty("Icy-MetaData", "1");
con.setRequestProperty("Connection", "close");
//con.setRequestProperty("Accept", null);
con.connect();
int metaDataOffset = 0;
Map<String, List<String>> headers = con.getHeaderFields();
InputStream stream = con.getInputStream();
if (headers.containsKey("icy-metaint")) {
// Headers are sent via HTTP
metaDataOffset = Integer.parseInt(headers.get("icy-metaint").get(0));
} else {
// Headers are sent within a stream
StringBuilder strHeaders = new StringBuilder();
char c;
while ((c = (char)stream.read()) != -1) {
strHeaders.append(c);
if (strHeaders.length() > 5 && (strHeaders.substring((strHeaders.length() - 4), strHeaders.length()).equals("\r\n\r\n"))) {
// end of headers
break;
}
}
// Match headers to get metadata offset within a stream
Pattern p = Pattern.compile("\\r\\n(icy-metaint):\\s*(.*)\\r\\n");
Matcher m = p.matcher(strHeaders.toString());
if (m.find()) {
metaDataOffset = Integer.parseInt(m.group(2));
}
}
// In case no data was sent
if (metaDataOffset == 0) {
isError = true;
return;
}
// Read metadata
int b;
int count = 0;
int metaDataLength = 4080; // 4080 is the max length
boolean inData = false;
StringBuilder metaData = new StringBuilder();
// Stream position should be either at the beginning or right after headers
while ((b = stream.read()) != -1) {
count++;
// Length of the metadata
if (count == metaDataOffset + 1) {
metaDataLength = b * 16;
}
if (count > metaDataOffset + 1 && count < (metaDataOffset + metaDataLength)) {
inData = true;
} else {
inData = false;
}
if (inData) {
if (b != 0) {
metaData.append((char)b);
}
}
if (count > (metaDataOffset + metaDataLength)) {
break;
}
}
// Set the data
metadata = IcyStreamMeta.parseMetadata(metaData.toString());
// Close
stream.close();
}
public boolean isError() {
return isError;
}
public URL getStreamUrl() {
return streamUrl;
}
public void setStreamUrl(URL streamUrl) {
this.metadata = null;
this.streamUrl = streamUrl;
this.isError = false;
}
public static Map<String, String> parseMetadata(String metaString) {
Map<String, String> metadata = new HashMap<String, String>();
String[] metaParts = metaString.split(";");
Pattern p = Pattern.compile("^([a-zA-Z]+)=\\'([^\\']*)\\'$");
Matcher m;
for (int i = 0; i < metaParts.length; i++) {
m = p.matcher(metaParts[i]);
if (m.find()) {
metadata.put((String)m.group(1), (String)m.group(2));
}
}
return metadata;
}
}
And here is my timer:
private void getMeta() {
timer.schedule(new TimerTask() {
public void run() {
try {
icy = new IcyStreamMeta(new URL(stationUrl));
runOnUiThread(new Runnable() {
public void run() {
try {
artist.setText(icy.getArtist());
title.setText(icy.getTitle());
} catch (IOException e) {
e.printStackTrace();
} catch (StringIndexOutOfBoundsException e) {
e.printStackTrace();
}
}
});
} catch (MalformedURLException e) {
e.printStackTrace();
}
}
},0,5000);
}
Much appreciation for any assistance!
I've replaced the IcyStreamMeta class in my program and am getting the meta data from the 7.html file that is a part of the SHOUTcast spec. Far less data usage and all that so I feel it is a better option.
I am still using the TimerTask, which is acceptable. There is practically no GC any more and I am happy with using 7.html and a little regex. :)