Find all cycles in undirected graph - java

I am using Algorithms 4th edition to polish up my graph theory a bit. The books comes with a lot of code for graph processing.
Currently, I am stuck with the following problems: How to find all cycles in an undirected graph?
I was looking to modify the existing code for cycle detection to do that.
Here is the important part:
private void dfs(Graph G, int u, int v) {
marked[v] = true;
for (int w : G.adj(v)) {
// short circuit if cycle already found
if (cycle != null) return;
if (!marked[w]) {
edgeTo[w] = v;
dfs(G, v, w);
}
// check for cycle (but disregard reverse of edge leading to v)
else if (w != u) {
cycle = new Stack<Integer>();
for (int x = v; x != w; x = edgeTo[x]) {
cycle.push(x);
}
cycle.push(w);
cycle.push(v);
}
}
}
Now, if I were to find ALL cycles, I should remove the line that returns when a cycle is found and each time a cycle is created I would store it. The part I cannot figure out is: when does the algorithm stop? How can I be sure I have found all cycles?
Can the above code even be modified in a way to allow me to find all cycles?

Cycle detection is much easier than finding all cycles. Cycle detection can be done in linear time using a DFS like you've linked, but the number of cycles in a graph can be exponential, ruling out an polytime algorithm altogether. If you don't see how this could be possible, consider this graph:
1 -- 2
| / |
| / |
3 -- 4
There are three distinct cycles, but a DFS would find only two back-edges.
As such, modifying your algorithm to find all cycles will take a fair bit more work than simply changing a line or two. Instead, you have to find a set of base cycles, then combine them to form the set of all cycles. You can find an implementation of an algorithm that'll does this in this question.

/**
* In this program we create a list of edges which is an ordered pair of two
* integers representing two vertices.
*
* We iterate through each edge and apply Union Find algorithm to detect
* cycle.
*
* This is a tested code and gives correct result for all inputs.
*/
package com.divyanshu.ds.disjointSet;
import java.util.HashMap;
/**
* #author Divyanshu
* DisjointSet is a data structure with three operations :
* makeSet, union and findSet
*
* Algorithms Used : Union by rank and path compression for detecting cycles
* in an undirected graph.
*/
public class DisjontSet {
HashMap<Long, Node> map = new HashMap<>();
class Node {
long data;
Node parent;
int rank;
}
public void makeSet(long data) {
Node node = new Node();
node.data = data;
node.parent = node;
node.rank = 0;
map.put(data, node);
}
public void union(long firstSet,
long secondSet) {
Node firstNode = map.get(firstSet);
Node secondNode = map.get(secondSet);
Node firstParent = findSet(firstNode);
Node secondParent = findSet(secondNode);
if (firstParent.data == secondParent.data) {
return;
}
if (firstParent.rank >= secondParent.rank) {
firstParent.rank = (firstParent.rank == secondParent.rank) ? firstParent.rank + 1 : firstParent.rank;
secondParent.parent = firstParent;
} else {
firstParent.parent = secondParent;
}
}
public long findSet(long data) {
return findSet(map.get(data)).data;
}
private Node findSet(Node node) {
if (node.parent == node) {
return node;
}
node.parent = findSet(node.parent);
return node.parent;
}
}
=============================================================================
package com.divyanshu.ds.client;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import com.divyanshu.ds.disjointSet.DisjontSet;
import com.divyanshu.ds.disjointSet.Edge;
public class DisjointSetClient {
public static void main(String[] args) {
int edgeCount = 4;
int vertexCount = 12;
List<Edge> graph = generateGraph(edgeCount, vertexCount);
System.out.println("Generated Graph : ");
System.out.println(graph);
DisjontSet disjontSet = getDisjointSet(graph);
Boolean isGraphCyclic = isGraphCyclic(graph, disjontSet);
System.out.println("Graph contains cycle : " + isGraphCyclic);
}
private static Boolean isGraphCyclic(List<Edge> graph,
DisjontSet disjontSet) {
Boolean isGraphCyclic = false;
for (Edge edge : graph) {
if (edge.getFirstVertex() != edge.getSecondVertex()) {
Long first = disjontSet.findSet(edge.getFirstVertex());
Long second = disjontSet.findSet(edge.getSecondVertex());
if (first.equals(second)) {
isGraphCyclic = true;
break;
} else {
disjontSet.union(first, second);
}
}
}
return isGraphCyclic;
}
private static DisjontSet getDisjointSet(List<Edge> graph) {
DisjontSet disjontSet = new DisjontSet();
for (Edge edge : graph) {
disjontSet.makeSet(edge.getFirstVertex());
disjontSet.makeSet(edge.getSecondVertex());
}
return disjontSet;
}
private static List<Edge> generateGraph(int edgeCount,
int vertexCount) {
List<Edge> graph = new ArrayList<>();
HashSet<Edge> edgeSet = new HashSet<>();
Random random = new Random();
for (int j = 0; j < vertexCount; j++) {
int first = random.nextInt(edgeCount);
int second = random.nextInt(edgeCount);
if (first != second) {
edgeSet.add(new Edge(first, second));
} else {
j--;
}
}
for (Edge edge : edgeSet) {
graph.add(edge);
}
return graph;
}
}
===================================================================
/**
*
*/
package com.divyanshu.ds.disjointSet;
/**
* #author Divyanshu
*
*/
public class Edge {
private long firstVertex;
private long secondVertex;
public Edge(long firstVertex,
long secondVertex) {
this.firstVertex = firstVertex;
this.secondVertex = secondVertex;
}
public long getFirstVertex() {
return firstVertex;
}
public void setFirstVertex(long firstVertex) {
this.firstVertex = firstVertex;
}
public long getSecondVertex() {
return secondVertex;
}
public void setSecondVertex(long secondVertex) {
this.secondVertex = secondVertex;
}
#Override
public String toString() {
return "(" + firstVertex + "," + secondVertex + ")";
}
#Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (firstVertex ^ (firstVertex >>> 32));
result = prime * result + (int) (secondVertex ^ (secondVertex >>> 32));
return result;
}
#Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Edge other = (Edge) obj;
if (firstVertex != other.firstVertex)
return false;
if (secondVertex != other.secondVertex)
return false;
return true;
}
}

Related

A* algorithm completes but returns suboptimal path

The algorithm:
Uses a PQ that supports change priority operations.
Assume all the ADTs work correctly.
Example problem: Find the shortest path of operations to get from integer x to integer y using the following operations and weights; add/subtract 1 : 1, multiply/divide by 2 : 5, square : 10.
Tested against other types of graphs and inputs, sometimes it gets the shortest path, sometimes it gets a suboptimal, sometimes it times out.
import java.util.*;
/**
* Represents a graph of vertices.
*/
public interface AStarGraph<Vertex> {
List<WeightedEdge<Vertex>> neighbors(Vertex v);
double estimatedDistanceToGoal(Vertex s, Vertex goal);
}
public interface ShortestPathsSolver<Vertex> {
SolverOutcome outcome();
List<Vertex> solution();
double solutionWeight();
int numStatesExplored();
double explorationTime();
}
public interface ExtrinsicMinPQ<T> {
/* Inserts an item with the given priority value. */
void add(T item, double priority);
/* Returns true if the PQ contains the given item. */
boolean contains(T item);
/* Returns the minimum item. */
T getSmallest();
/* Removes and returns the minimum item. */
T removeSmallest();
/* Changes the priority of the given item. Behavior undefined if the item doesn't exist. */
void changePriority(T item, double priority);
/* Returns the number of itemToPriority in the PQ. */
int size();
}
public enum SolverOutcome {
SOLVED, TIMEOUT, UNSOLVABLE
}
public class ArrayHeapMinPQ<T> implements ExtrinsicMinPQ<T> {
private ArrayList<PriorityNode> heap;
int count;
private HashMap<T, PriorityNode> items;
public ArrayHeapMinPQ() {
heap = new ArrayList<>();
heap.add(0, new PriorityNode(null, Double.NEGATIVE_INFINITY));
count = 0; // For convenient math
items = new HashMap<>();
}
#Override
public void add(T item, double priority) {
if(contains(item)){
throw new IllegalArgumentException();
}
PriorityNode pn = new PriorityNode(item, priority);
if(count == 0){
heap.add(1, pn);
count++;
}else{
heap.add(count+1, pn);
swim(count+1);
count++;
}
items.put(item, pn);
}
private void swap(int i, int j){
Collections.swap(heap, i, j);
}
private void swim(int i){
while(i > 1){
PriorityNode cur = heap.get(i);
if((cur.compareTo(heap.get(i/2)) >= 0)){
break;
}
swap(i, i/2);
i = i/2;
}
}
private void sink(int k){
while (2*k <= size()-1) {
if(2*k+1 <= size()-1) {
if (heap.get(2 * k).compareTo(heap.get(2 * k + 1)) < 0) {
if (heap.get(k).compareTo(heap.get(2 * k)) > 0) {
swap(k, 2 * k);
k = 2 * k;
continue;
}
} else if(heap.get(k).compareTo(heap.get(2*k+1)) > 0){
swap(2*k+1, k);
k = 2*k +1;
continue;}
}
else if (heap.get(k).compareTo(heap.get(2 * k)) > 0) {
swap(k, 2 * k);
k = 2 * k;
continue;}
break;
}
}
#Override
public int size(){
return heap.size();
}
public PriorityNode getRoot(){
return heap.get(1);
}
#Override
public boolean contains(T item) {
return items.containsKey(item);}
#Override
public T getSmallest() {
if(heap.size() == 0){
throw new NoSuchElementException();
}
return getRoot().getItem();
}
#Override
public T removeSmallest() {
if(heap.size() == 1){
throw new NoSuchElementException();
}
T item = heap.get(1).item;
swap(1, size()-1);
heap.remove(size()-1);
if(size() > 1){
sink(1);
}
items.remove(item);
count--;
return item;
}
public boolean isEmpty(){
return heap.size() == 1;
}
public int getCount() {
return count;
}
#Override
public void changePriority(T T, double priority) {
if(heap.size() == 0){
throw new NoSuchElementException();
}
PriorityNode tochange = items.get(T);
double prioritysearch = tochange.getPriority();
double currprior = getRoot().getPriority();
int left = 0;
int right = 0;
if((prioritysearch != currprior)){
if (currprior > prioritysearch){
add(T, priority);
return;
}
left = heapTraverse(prioritysearch, tochange, 2);
right = heapTraverse(prioritysearch, tochange, 3);
}
if(left == -1 && right == -1){
throw new NoSuchElementException();
}
else if(left > 0){
PriorityNode toChange = heap.get(left);
toChange.setPriority(priority);
if(priority < heap.get(left/2).getPriority()){
swim(left);
}else
sink(left);
}
else {
PriorityNode toChange = heap.get(right);
toChange.setPriority(priority);
if (priority < heap.get(right / 2).getPriority()) {
swim(right);
} else
sink(right);
}
}
private int heapTraverse(double priority, PriorityNode node, int index){
if(index > heap.size()-1){
return -1;
}
PriorityNode curr = heap.get(index);
double currprior = curr.getPriority();
if(currprior == priority && node.equals(curr)){
return index;
} else if(currprior > priority){
return -1;
}else{
if(heapTraverse(priority, node, index*2) == -1){
return heapTraverse(priority, node, index*2 +1);}
else {return heapTraverse(priority, node, index*2);}
}
}
private class PriorityNode implements Comparable<PriorityNode> {
private T item;
private double priority;
PriorityNode(T e, double p) {
this.item = e;
this.priority = p;
}
T getItem() {
return item;
}
double getPriority() {
return priority;
}
void setPriority(double priority) {
this.priority = priority;
}
#Override
public int compareTo(PriorityNode other) {
if (other == null) {
return -1;
}
return Double.compare(this.getPriority(), other.getPriority());
}
#Override
public boolean equals(Object o) {
if( o == null) {
throw new NullPointerException();
}
if (o.getClass() != this.getClass()) {
return false;
} else {
return ((PriorityNode) o).getItem().equals(getItem());
}
}
#Override
public int hashCode() {
return item.hashCode();
}
}
public class WeightedEdge<Vertex> {
private Vertex v;
private Vertex w;
private double weight;
public WeightedEdge(Vertex v, Vertex w, double weight) {
this.v = v;
this.w = w;
this.weight = weight;
}
public Vertex from() {
return v;
}
public Vertex to() {
return w;
}
public double weight() {
return weight;
}
}
public class SolutionPrinter {
/** Summarizes the result of the search made by this solver without actually
* printing the solution itself (if any).
*/
public static <Vertex> void summarizeOutcome(ShortestPathsSolver<Vertex> solver) {
summarizeSolution(solver, "", false);
}
/** Summarizes the result of the search made by this solver and also
* prints each vertex of the solution, connected by the given delimiter,
* e.g. delimiter = "," would return all states separated by commas.
*/
public static <Vertex> void summarizeSolution(ShortestPathsSolver<Vertex> solver,
String delimiter) {
summarizeSolution(solver, delimiter, true);
}
private static <Vertex> String solutionString(ShortestPathsSolver<Vertex> solver,
String delimiter) {
List<String> solutionVertices = new ArrayList<>();
for (Vertex v : solver.solution()) {
solutionVertices.add(v.toString());
}
return String.join(delimiter, solutionVertices);
}
private static <Vertex> void summarizeSolution(ShortestPathsSolver<Vertex> solver,
String delimiter, boolean printSolution) {
System.out.println("Total states explored in " + solver.explorationTime()
+ "s: " + solver.numStatesExplored());
if (solver.outcome() == SolverOutcome.SOLVED) {
List<Vertex> solution = solver.solution();
System.out.println("Search was successful.");
System.out.println("Solution was of length " + solution.size()
+ ", and had total weight " + solver.solutionWeight() + ":");
if (printSolution) {
System.out.println(solutionString(solver, delimiter));
}
} else if (solver.outcome() == SolverOutcome.TIMEOUT) {
System.out.println("Search timed out, considered " + solver.numStatesExplored()
+ " vertices before timing out.");
} else { // (solver.outcome() == SolverOutcome.UNSOLVABLE)
System.out.println("Search determined that the goal is unreachable from source.");
}
}
}
public class AStarSolver implements ShortestPathsSolver {
private final AStarGraph<Vertex> graph;
private Vertex source;
private Vertex dest;
private SolverOutcome result;
private HashMap<Vertex, Double> distTo = new HashMap<>();
private ArrayHeapMinPQ<Vertex> fringe = new ArrayHeapMinPQ<>();
private HashMap<Vertex, WeightedEdge<Vertex>> edgeTo = new HashMap<>(); // answers the question which vertex to ge to this vertex
private double solutionweight;
private List<Vertex> solution;
private ArrayList<Vertex> marked = new ArrayList<>();
private double timetosolve;
private int numofstates = 0;
public AStarSolver(AStarGraph<Vertex> input, Vertex start, Vertex end, double timeout ){
graph = input;
source = start;
dest = end;
if(start.equals(end)){
solutionweight = 0;
solution = List.of(start);
result = SolverOutcome.SOLVED;
numofstates = 0;
timetosolve = 0;
return;
}
fringe.add(start, 0.0);
distTo.put(start, 0.0);
while (!fringe.isEmpty()) {
Vertex src = fringe.removeSmallest();
numofstates++;
marked.add(src);
List<WeightedEdge<Vertex>> neighbors = graph.neighbors(src);
for(WeightedEdge<Vertex> e: neighbors){
double heur = graph.estimatedDistanceToGoal(e.to(), dest);
if ((heur == Double.POSITIVE_INFINITY || marked.contains(e.to())) && !e.to().equals(dest)) {
continue;
}
double distFr = distTo.get(e.from()) + e.weight();
if(!distTo.containsKey(e.to())){
distTo.put(e.to(), distFr);
}
if (!fringe.contains(e.to())) {
fringe.add(e.to(), distFr + heur);
edgeTo.put(e.to(), e);
}
if (distTo.get(e.to()) > distFr) {
fringe.changePriority(e.to(), heur + distFr);
edgeTo.put(e.to(), e);
distTo.put(e.to(), distFr);
}
if (e.to().equals(dest)) {
solutionweight = distTo.get(e.to());
solution = pathTracer(e);
timetosolve = sw.elapsedTime();
result = SolverOutcome.SOLVED;
return;
}
if (e.to().equals(dest)) {
solutionweight = distTo.get(e.to());
solution = pathTracer(e);
timetosolve = sw.elapsedTime();
result = SolverOutcome.SOLVED;
return;
}
}
if (timeout < sw.elapsedTime()){
result = SolverOutcome.TIMEOUT;
return;
}
}
result = SolverOutcome.UNSOLVABLE;
solution = List.of();
}
private List<Vertex> pathTracer(WeightedEdge<Vertex> e) {
ArrayList<Vertex> path = new ArrayList<>();
path.add(e.to());
path.add(e.from());
while (!e.from().equals(source)) {
e = edgeTo.get(e.from());
path.add(e.from());
}
Collections.reverse(path);
return path;
}
#Override
public SolverOutcome outcome() {
return result;
}
#Override
public List solution() {
return solution;
}
#Override
public double solutionWeight() {
return solutionweight;
}
#Override
public int numStatesExplored() {
return numofstates;
}
#Override
public double explorationTime() {
return timetosolve;
}
public class IntegerHopGraph implements AStarGraph<Integer> {
#Override
public List<WeightedEdge<Integer>> neighbors(Integer v) {
ArrayList<WeightedEdge<Integer>> neighbors = new ArrayList<>();
neighbors.add(new WeightedEdge<>(v, v * v, 10));
neighbors.add(new WeightedEdge<>(v, v * 2, 5));
neighbors.add(new WeightedEdge<>(v, v / 2, 5));
neighbors.add(new WeightedEdge<>(v, v - 1, 1));
neighbors.add(new WeightedEdge<>(v, v + 1, 1));
return neighbors;
}
#Override
public double estimatedDistanceToGoal(Integer s, Integer goal) {
// possibly fun challenge: Try to find an admissible heuristic that
// speeds up your search. This is tough!
return 0;
}
}
public class DemoIntegerHopPuzzleSolution {
public static void main(String[] args) {
int start = 17;
int goal = 111;
IntegerHopGraph ahg = new IntegerHopGraph();
ShortestPathsSolver<Integer> solver = new AStarSolver<>(ahg, start, goal, 10);
SolutionPrinter.summarizeSolution(solver, " => ");
}
}
}
To get from x = 11, to y = 117 the algorithm gives this result:
Total states explored in 0.019s: 110
Search was successful.
Solution was of length 7, and had total weight 19.0:
17 => 16 => 15 => 225 => 224 => 223 => 111
The correct result should is:
Total states explored in 0.018s: 338 <--- may vary.
Search was successful.
Solution was of length 6, and had total weight 18.0:
17 => 16 => 15 => 225 => 112 => 111
Thanks for all the help guys but I figured it out. My algorithm terminates prematurely. It stops when it sees the finish, not when it is on the top of the heap like it should.

BFS traversal of all paths in graph using adjacency list

I am currently trying to traverse all paths from source to destination in a graph which uses adjacency matrix. I have been trying to do it in BFS way.Thanks for the help. I am getting only one path. How do I get to print other paths as well ?
public class AllPossiblePaths {
static int v;
static ArrayList<Integer> adj[];
public AllPossiblePaths(int v) {
this.v = v;
adj = new ArrayList[v];
for (int i = 0; i < v; i++) {
adj[i] = new ArrayList<>();
}
}
// add edge from u to v
public static void addEdge(int u, int v) {
adj[u].add(v);
}
public static void findpaths(int source, int destination) {
LinkedList<ArrayList<Integer>> q = new LinkedList<>();
boolean visited[] = new boolean[v];
LinkedList<Integer> queue = new LinkedList<Integer>();
queue.add(source);
visited[source] = true;
ArrayList<Integer> localPath = new ArrayList<>();
while (!queue.isEmpty()) {
// Dequeue a vertex from queue and print it
int src = queue.poll();
if (!localPath.contains(src)) {
localPath.add(src);
}
if (src == destination) {
System.out.println(localPath);
localPath.remove(localPath.size() - 1);
visited[src] = false;
}
Iterator<Integer> i = adj[src].listIterator();
while (i.hasNext()) {
int n = i.next();
if (!visited[n]) {
queue.add(n);
}
}
}
}
}
Using the following class you can run a BFS to find a single path (findPath) or find multiple paths (findAllPaths). See comments:
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
public class AllPossiblePaths {
private boolean[] visited;
//keep track of nodes already included in a path
private boolean[] includedInPath;
private LinkedList<Integer> queue;
private int numberOfNodes;
private List<Integer>[] adj;
//to find a path you need to store the path that lead to it
private List<Integer>[] pathToNode;
public AllPossiblePaths(int numberOfNodes) {
this.numberOfNodes = numberOfNodes;
adj = new ArrayList[numberOfNodes];
pathToNode = new ArrayList[numberOfNodes];
for (int i = 0; i < numberOfNodes; i++) {
adj[i] = new ArrayList<>();
}
}
// add edge from u to v
public AllPossiblePaths addEdge(int from, int to) {
adj[from].add(to);
//unless unidirectional: //if a is connected to b
//than b should be connected to a
adj[to].add(from);
return this; //makes it convenient to add multiple edges
}
public void findPath(int source, int destination) {
System.out.println("------------Single path search---------------");
initializeSearch(source);
while (!queue.isEmpty()) {
// Dequeue a vertex from queue and print it
int src = queue.poll();
visited[src] = true;
if (src == destination) {
System.out.println("Path from "+source+" to "
+ destination+ " :- "+ pathToNode[src]);
break; //exit loop if target found
}
Iterator<Integer> i = adj[src].listIterator();
while (i.hasNext()) {
int n = i.next();
if (! visited[n] && ! queue.contains(n)) {
queue.add(n);
pathToNode[n].addAll(pathToNode[src]);
pathToNode[n].add(src);
}
}
}
}
public void findAllpaths(int source, int destination) {
System.out.println("-----------Multiple path search--------------");
includedInPath = new boolean[numberOfNodes];
initializeSearch(source);
int pathCounter = 0;
while(! allVisited() && !queue.isEmpty()) {
while (!queue.isEmpty()) {
// Dequeue a vertex from queue and print it
int src = queue.poll();
visited[src] = true;
if (src == destination) {
System.out.println("Path " + ++pathCounter + " from "+source+" to "
+ destination+ " :- "+ pathToNode[src]);
//mark nodes that are included in the path, so they will not be included
//in any other path
for(int i=1; i < pathToNode[src].size(); i++) {
includedInPath[pathToNode[src].get(i)] = true;
}
initializeSearch(source); //initialize before restarting
break; //exit loop if target found
}
Iterator<Integer> i = adj[src].listIterator();
while (i.hasNext()) {
int n = i.next();
if (! visited[n] && ! queue.contains(n)
&& ! includedInPath[n] /*ignore nodes already in a path*/) {
queue.add(n);
pathToNode[n].addAll(pathToNode[src]);
pathToNode[n].add(src);
}
}
}
}
}
private void initializeSearch(int source) {
queue = new LinkedList<>();
queue.add(source);
visited = new boolean[numberOfNodes];
for (int i = 0; i < numberOfNodes; i++) {
pathToNode[i]= new ArrayList<>();
}
}
private boolean allVisited() {
for( boolean b : visited) {
if(! b ) return false;
}
return true;
}
}
For testing it, consider this graph:
Run test:
public static void main(String[] args){
AllPossiblePaths app = new AllPossiblePaths(6);
app.addEdge(0, 4)
.addEdge(0, 1)
.addEdge(1, 2)
.addEdge(1, 4)
.addEdge(4, 3)
.addEdge(2, 3)
.addEdge(2, 5)
.addEdge(3, 5);
app.findPath(0,5);
app.findPath(5,0);
app.findAllpaths(0,5);
}
output:
Apparently it is impossible to retrieve all paths from a given source to a given terminal via Breadth-First search. Consider the following class of graphs.
For any nonnegative integer n, let
V := {v_1,...,v2_n} // inner vertices
union
{s, t}, // source and terminal
E := { {v_i,v+2,} : i < 2n-2 } // horizontal edges
union
{ {v_i,v_i+3} : i < 2n-3, i is odd } // cross edges from top to bottom
union
{ {v_i,v_i+3} : i < 2n-3, i is even } // cross edges from bottom to top
union
{ {s,v_1}, {s,v_2}, {t,v_2n-1}, {t,v_2n} } // source and terminal
Informally, the graph consists out of two rows of vertices with n columns each, to the left there is a source node and to the right there is a terminal node. For each path from s to t, you can choose for each column to stay in the current row or to switch to the other row.
In total, there are 2^n different paths from s to t, as for each column there are two possibilities to chose the row.
On the other hand, Breadth-First search yields a runtime bound which is polynomial in the encoding length of the graph; this means that Breadth-first search, in general, cannot generate all possible paths from a given source to a given terminal. Furthermore, if the graph contains a cycle, the number of paths might be inifinite via repetition of the cycle.

Interesting system.exit() behaviour and break behaviour - what is happening?

I am trying to implement BFS as follows:
package search;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.Queue;
import common.MyNode;
import common.Parser;
public class BFS extends Search {
Queue<MyNode> frontier;
Queue<MyNode> visited;
ArrayList<MyNode> route;
Parser p;
boolean found = false;
private int toWriteInt = 0;
private double distance;
public BFS(MyNode startNode, MyNode goalNode, Parser p) {
super(startNode, goalNode, p);
frontier = new LinkedList<MyNode>();
visited = new LinkedList<MyNode>();
this.p = p;
route = new ArrayList<MyNode>();
}
public void Search() {
visited.add(this.getStartNode());
if (isGoal(this.getStartNode())) {
System.out.println("goal found at start");
goalFound();
}
ArrayList<MyNode> successors = this.getStartNode().getSuccessors();
for (int i = 0; i < successors.size(); i++) {
successors.get(i).setParent(this.getStartNode());
if (!(visited.contains(successors.get(i)))
&& !(frontier.contains(successors.get(i)))) {
if (isGoal(successors.get(i))) {
visited.add(successors.get(i));
System.out.println("goal found at start successor");
goalFound();
break;
} else {
frontier.add(successors.get(i));
}
}
}
while (!frontier.isEmpty()) {
MyNode current = frontier.poll();
ArrayList<MyNode> currentSuccessors = current.getSuccessors();
visited.add(current);
for (int i = 0; i < currentSuccessors.size(); i++) {
if (!(visited.contains(currentSuccessors.get(i)))
&& !(frontier.contains(currentSuccessors.get(i)))) {
currentSuccessors.get(i).setParent(current);
if (isGoal(currentSuccessors.get(i))) {
visited.add(currentSuccessors.get(i));
System.out.println("goal found in loop");
goalFound();
break;
} else {
frontier.add(currentSuccessors.get(i));
}
}
}
}
}
private boolean isGoal(MyNode toCheck) {
boolean goal = false;
if (toCheck.equals(this.getGoalNode())) {
goal = true;
}
return goal;
}
private void goalFound() {
System.out.println("goal found with " + visited.size());
printRoute();
System.exit(0);
}
This all works find. The print route method is as follows:
public void printRoute() {
MyNode i = this.getGoalNode();
while (i.getParent() != null) {
System.out.println(i.getId());
distance = distance
+ Search.distFrom(i.getLat(), i.getLon(), i.getParent()
.getLat(), i.getParent().getLon());
i = i.getParent();
}
System.out.println(this.startNode.getId());
System.out.println("completed routing");
System.out.println("path length of " + distance + "km");
}
}
Once again, this works fine and prints the correct route. However, If i remove the system.exit(0) the code does not work and the while loop of printRoute() will simply print forever, printing out the same two nodes twice (i and i.getParent()). I find this really strange, as the method must have completed to hit my system.exit.
Can anyone tell me why this is? It's very problematic to have to call system.exit as i would like to embed BFS in my code and it means I cannot.
thanks,
MJB
edit: It was suggested below that returning would fix my issue. Below is A* search that exhibits the same behaviour and only works if I System.Exit even though I return:
package search;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Stack;
import common.MyNode;
import common.Parser;
public class AStar extends Search {
private ArrayList<MyNode> openList; // This is basically our frontier
private ArrayList<MyNode> closedList;// This is the visited set.
public AStar(MyNode startNode, MyNode goalNode, Parser p) {
super(startNode, goalNode, p);
openList = new ArrayList<MyNode>();
closedList = new ArrayList<MyNode>();
}
public Stack<MyNode> Search() {
Stack<MyNode> toReturn = new Stack<MyNode>();
openList.add(this.getStartNode());
while (!openList.isEmpty()) {
Collections.sort(openList);
MyNode q = openList.remove(0);
openList.remove(q);
// generate q's 8 successors and set their parents to q
// if successor is the goal, stop the search
if (isGoal(q)) {
System.out.println("search completed- goal found");
MyNode i = this.getGoalNode();
while (i.getParent() != null) {
toReturn.add(i);
i = q.getParent();
}
return toReturn;
}
closedList.add(q);
ArrayList<MyNode> successors = q.getSuccessors();
for (MyNode node : successors) {
if (closedList.contains(node)) {
continue;
}
node.setParent(q);
/*
* successor.g = q.g + distance between successor and q
* successor.h = distance from goal to successor
* successor.f=successor.g + successor.h
*/
double g = q.getG()
+ Search.distFrom(q.getLat(), q.getLon(),
node.getLat(), node.getLon());
double h = Search.distFrom(this.getGoalNode().getLat(), this
.getGoalNode().getLon(), q.getLat(), q.getLon());
node.setG(g);
node.setH(h);
node.setF(g + h);
// if a node with the same position as successor is in the OPEN
// list
// has a lower f than successor, skip this successor
int openIndex = openList.indexOf(node);
int closedIndex = closedList.indexOf(node);
if (openIndex > -1) {
if (openList.get(openIndex).compareTo(node) == -1)
continue;
}
// if a node with the same position as successor is in the
// CLOSED list
// which has a lower f than successor, skip this successor
if (closedIndex > -1) {
if (closedList.get(closedIndex).compareTo(node) == -1)
continue;
}
if (openIndex > -1)
openList.remove(openIndex);
Collections.sort(openList);
if (closedIndex > -1)
closedList.remove(closedIndex);
openList.add(node);
Collections.sort(openList);
}
closedList.add(0, q);
}
return toReturn;
}
edit: as requested, main method:
public static void main(String[] args) {
AStar search = new AStar(parse.getAllNodes().get(new Long("21295291")),
parse.getAllNodes().get(new Long("319561075")), parse);
search.Search();
}
These nodes have the following attributes:
node [id=21295291, lat=52.4737031, lon=-1.8747258]
node [id=119126329, lat=52.4701337, lon=-1.8716235]
Code
goalFound();
break;
breaks inner loop (for) but you have also outer while loop. You can use 'return;' here instead of 'break' (or labeled break).

Correct heap implementation in a priority queue

My issue is more semantic than functional, As the code does seem to implement the deQueue and enQueue functions correctly.
The reheapDown and reheapUp functions are being used incorrectly, And i believe the issue lies in my heap function
package priqueue;
public class Hosheap{
private Patient[] elements;
private int numElements;
public Hosheap(int maxSize)
{
elements= new Patient[maxSize];
numElements=maxSize;
}
public void ReheapDown(int root,int bottom)
{
int maxChild;
int rightChild;
int leftChild;
leftChild=root*2+1;
rightChild=root*2+2;
if (leftChild<=bottom)
{
if(leftChild==bottom)
maxChild=leftChild;
else
{
if(elements[leftChild].getPriority() <= elements[rightChild].getPriority())
maxChild=rightChild;
else
maxChild=leftChild;
}
if(elements[root].getPriority()<elements[maxChild].getPriority())
{
Swap(root,maxChild);
ReheapDown(maxChild,bottom);
}
}
}
public void ReheapUp(int root,int bottom)
{
int parent;
if(bottom>root)
{
parent=(bottom-1)/2;
if(elements[parent].getPriority()<elements[bottom].getPriority())
{
Swap(parent,bottom);
ReheapUp(root,parent);
}
}
}
public void Swap(int Pos1, int Pos2)
{
Patient temp;
temp = elements[Pos1];
elements[Pos1]=elements[Pos2];
elements[Pos2]=temp;
}
public Patient getElement(int e)
{
return elements[e];
}
public void setElement(Patient p, int n)
{
elements[n]=p;
}
}
The idea is to rearrange a simple priority queue system so when a patient object is removed, ReheapUp or down correctly rearranges the queue, Which the code does not accomplish. Should i also include the priority queue code, Or is this already too lengthy?
I am using NetBeans IDE 6.0.1, If that helps.
Depending on your usage requirements, the answer relating to TreeSets will most probably do what you want.
However if you really need a queue, as opposed to a sorted collection, then the inbuilt PriorityQueue may be of use.
Not exactly answering your question, but with Java you may want to look into the built-in Collection classes. You can get priority queue behavior but using a TreeSet (a type of ordered-set) and implementing a custom Comparator for Patient instances. Depending what you're trying to achieve, this may be preferable. It would look something like this:
In Patient.java ...
class Patient implements Comparator {
...
public int compareTo(Patient other) {
return getPriority() > other.getPriority() ? 1 : 0;
}
Then in the place you want to use the queue
Set<Patient> queue = new TreeSet<Patient>();
queue.add(p1);
queue.add(p2);
//traverse in order of priority
for(Patient p : queue) {
doStuff();
}
Here is a simple implementation of a PriorityHeap. I coded it up pretty quick so it may have some flaws but I have implemented the pushUp() and pushDown() logic.
import java.util.Random;
public class Heap {
private Double[] data;
private int lastItem;
public Heap(int initialSize) {
// to simplify child/parent math leave the first index empty
// and use a lastItem that gives us the size
data = new Double[initialSize];
lastItem = 0;
}
public void insert(Double d) {
// double size if needed
// should have a matching shrink but this is example code
if (lastItem + 1 >= data.length) {
Double[] doubled = new Double[data.length * 2];
System.arraycopy(data, 0, doubled, 0, data.length);
data = doubled;
}
data[lastItem + 1] = d;
lastItem++;
pushUp(lastItem);
}
public void pushDown(int index) {
if (lastItem > 1) {
int leftChildIndex = index * 2;
int rightChildIndex = leftChildIndex + 1;
// assume that neither child will dominate (in priority)
// the item at index
int indexToPromote = index;
// there may not be a left child
if (leftChildIndex <= lastItem) {
Double leftChild = data[leftChildIndex];
Double tmp = data[index];
if (tmp.compareTo(leftChild) < 0) {
indexToPromote = leftChildIndex;
}
// there might not be a right child
if (rightChildIndex <= lastItem) {
Double rightChild = data[rightChildIndex];
tmp = data[indexToPromote];
if (tmp.compareTo(rightChild) < 0) {
indexToPromote = rightChildIndex;
}
}
}
// did either child dominate the item at index
// if so swap and push down again
if (indexToPromote != index) {
swap(index, indexToPromote);
pushDown(indexToPromote);
}
}
}
public void pushUp(int index) {
if (index > 1) {
// equivalent to floor((double)index/2.0d);
// if item at index is greater than its parent
// push the item up to until if finds a home
int parentIndex = index >>> 1;
Double parent = data[parentIndex];
Double item = data[index];
if (item.compareTo(parent) > 0) {
swap(parentIndex, index);
pushUp(parentIndex);
}
}
}
public Double removeTop() {
// assume size is zero then examine other cases
Double top = null;
if (lastItem > 1) {
// save the top item and take the bottom item and place it
// at the top the push the new top item down until it
// finds a home
top = data[1];
Double bottom = data[lastItem];
lastItem--;
data[1] = bottom;
pushDown(1);
} else if (lastItem == 1) {
top = data[1];
lastItem--;
}
return top;
}
public int size() {
return lastItem;
}
private void swap(int index1, int index2) {
Double temp = data[index1];
data[index1] = data[index2];
data[index2] = temp;
}
public static void main(String[] args) {
Heap heap = new Heap(4);
Random r = new Random();
for (int i = 0; i < 100000; i++) {
Double d = Double.valueOf(r.nextDouble() * 100.0d);
heap.insert(d);
}
double max = Double.MAX_VALUE;
while (heap.size() > 0) {
Double top = heap.removeTop();
if (top.doubleValue() > max) {
System.out.println("bad ordering...");
}
max = top.doubleValue();
System.out.println(max);
}
System.out.println("done...");
}
}

Where do I find a standard Trie based map implementation in Java? [closed]

Closed. This question does not meet Stack Overflow guidelines. It is not currently accepting answers.
We don’t allow questions seeking recommendations for books, tools, software libraries, and more. You can edit the question so it can be answered with facts and citations.
Closed 2 years ago.
Improve this question
I have a Java program that stores a lot of mappings from Strings to various objects.
Right now, my options are either to rely on hashing (via HashMap) or on binary searches (via TreeMap). I am wondering if there is an efficient and standard trie-based map implementation in a popular and quality collections library?
I've written my own in the past, but I'd rather go with something standard, if available.
Quick clarification: While my question is general, in the current project I am dealing with a lot of data that is indexed by fully-qualified class name or method signature. Thus, there are many shared prefixes.
You might want to look at the Trie implementation that Limewire is contributing to the Google Guava.
There is no trie data structure in the core Java libraries.
This may be because tries are usually designed to store character strings, while Java data structures are more general, usually holding any Object (defining equality and a hash operation), though they are sometimes limited to Comparable objects (defining an order). There's no common abstraction for "a sequence of symbols," although CharSequence is suitable for character strings, and I suppose you could do something with Iterable for other types of symbols.
Here's another point to consider: when trying to implement a conventional trie in Java, you are quickly confronted with the fact that Java supports Unicode. To have any sort of space efficiency, you have to restrict the strings in your trie to some subset of symbols, or abandon the conventional approach of storing child nodes in an array indexed by symbol. This might be another reason why tries are not considered general-purpose enough for inclusion in the core library, and something to watch out for if you implement your own or use a third-party library.
Apache Commons Collections v4.0 now supports trie structures.
See the org.apache.commons.collections4.trie package info for more information. In particular, check the PatriciaTrie class:
Implementation of a PATRICIA Trie (Practical Algorithm to Retrieve Information Coded in Alphanumeric).
A PATRICIA Trie is a compressed Trie. Instead of storing all data at the edges of the Trie (and having empty internal nodes), PATRICIA stores data in every node. This allows for very efficient traversal, insert, delete, predecessor, successor, prefix, range, and select(Object) operations. All operations are performed at worst in O(K) time, where K is the number of bits in the largest item in the tree. In practice, operations actually take O(A(K)) time, where A(K) is the average number of bits of all items in the tree.
Also check out concurrent-trees. They support both Radix and Suffix trees and are designed for high concurrency environments.
I wrote and published a simple and fast implementation here.
What you need is org.apache.commons.collections.FastTreeMap , I think.
Below is a basic HashMap implementation of a Trie. Some people might find this useful...
class Trie {
HashMap<Character, HashMap> root;
public Trie() {
root = new HashMap<Character, HashMap>();
}
public void addWord(String word) {
HashMap<Character, HashMap> node = root;
for (int i = 0; i < word.length(); i++) {
Character currentLetter = word.charAt(i);
if (node.containsKey(currentLetter) == false) {
node.put(currentLetter, new HashMap<Character, HashMap>());
}
node = node.get(currentLetter);
}
}
public boolean containsPrefix(String word) {
HashMap<Character, HashMap> node = root;
for (int i = 0; i < word.length(); i++) {
Character currentLetter = word.charAt(i);
if (node.containsKey(currentLetter)) {
node = node.get(currentLetter);
} else {
return false;
}
}
return true;
}
}
Apache's commons collections:
org.apache.commons.collections4.trie.PatriciaTrie
You can try the Completely Java library, it features a PatriciaTrie implementation. The API is small and easy to get started, and it's available in the Maven central repository.
You might look at this TopCoder one as well (registration required...).
If you required sorted map, then tries are worthwhile.
If you don't then hashmap is better.
Hashmap with string keys can be improved over the standard Java implementation:
Array hash map
If you're not worried about pulling in the Scala library, you can use this space efficient implementation I wrote of a burst trie.
https://github.com/nbauernfeind/scala-burst-trie
here is my implementation, enjoy it via: GitHub - MyTrie.java
/* usage:
MyTrie trie = new MyTrie();
trie.insert("abcde");
trie.insert("abc");
trie.insert("sadas");
trie.insert("abc");
trie.insert("wqwqd");
System.out.println(trie.contains("abc"));
System.out.println(trie.contains("abcd"));
System.out.println(trie.contains("abcdefg"));
System.out.println(trie.contains("ab"));
System.out.println(trie.getWordCount("abc"));
System.out.println(trie.getAllDistinctWords());
*/
import java.util.*;
public class MyTrie {
private class Node {
public int[] next = new int[26];
public int wordCount;
public Node() {
for(int i=0;i<26;i++) {
next[i] = NULL;
}
wordCount = 0;
}
}
private int curr;
private Node[] nodes;
private List<String> allDistinctWords;
public final static int NULL = -1;
public MyTrie() {
nodes = new Node[100000];
nodes[0] = new Node();
curr = 1;
}
private int getIndex(char c) {
return (int)(c - 'a');
}
private void depthSearchWord(int x, String currWord) {
for(int i=0;i<26;i++) {
int p = nodes[x].next[i];
if(p != NULL) {
String word = currWord + (char)(i + 'a');
if(nodes[p].wordCount > 0) {
allDistinctWords.add(word);
}
depthSearchWord(p, word);
}
}
}
public List<String> getAllDistinctWords() {
allDistinctWords = new ArrayList<String>();
depthSearchWord(0, "");
return allDistinctWords;
}
public int getWordCount(String str) {
int len = str.length();
int p = 0;
for(int i=0;i<len;i++) {
int j = getIndex(str.charAt(i));
if(nodes[p].next[j] == NULL) {
return 0;
}
p = nodes[p].next[j];
}
return nodes[p].wordCount;
}
public boolean contains(String str) {
int len = str.length();
int p = 0;
for(int i=0;i<len;i++) {
int j = getIndex(str.charAt(i));
if(nodes[p].next[j] == NULL) {
return false;
}
p = nodes[p].next[j];
}
return nodes[p].wordCount > 0;
}
public void insert(String str) {
int len = str.length();
int p = 0;
for(int i=0;i<len;i++) {
int j = getIndex(str.charAt(i));
if(nodes[p].next[j] == NULL) {
nodes[curr] = new Node();
nodes[p].next[j] = curr;
curr++;
}
p = nodes[p].next[j];
}
nodes[p].wordCount++;
}
}
I have just tried my own Concurrent TRIE implementation but not based on characters, it is based on HashCode. Still We can use this having Map of Map for each CHAR hascode.
You can test this using the code # https://github.com/skanagavelu/TrieHashMap/blob/master/src/TrieMapPerformanceTest.java
https://github.com/skanagavelu/TrieHashMap/blob/master/src/TrieMapValidationTest.java
import java.util.concurrent.atomic.AtomicReferenceArray;
public class TrieMap {
public static int SIZEOFEDGE = 4;
public static int OSIZE = 5000;
}
abstract class Node {
public Node getLink(String key, int hash, int level){
throw new UnsupportedOperationException();
}
public Node createLink(int hash, int level, String key, String val) {
throw new UnsupportedOperationException();
}
public Node removeLink(String key, int hash, int level){
throw new UnsupportedOperationException();
}
}
class Vertex extends Node {
String key;
volatile String val;
volatile Vertex next;
public Vertex(String key, String val) {
this.key = key;
this.val = val;
}
#Override
public boolean equals(Object obj) {
Vertex v = (Vertex) obj;
return this.key.equals(v.key);
}
#Override
public int hashCode() {
return key.hashCode();
}
#Override
public String toString() {
return key +"#"+key.hashCode();
}
}
class Edge extends Node {
volatile AtomicReferenceArray<Node> array; //This is needed to ensure array elements are volatile
public Edge(int size) {
array = new AtomicReferenceArray<Node>(8);
}
#Override
public Node getLink(String key, int hash, int level){
int index = Base10ToBaseX.getBaseXValueOnAtLevel(Base10ToBaseX.Base.BASE8, hash, level);
Node returnVal = array.get(index);
for(;;) {
if(returnVal == null) {
return null;
}
else if((returnVal instanceof Vertex)) {
Vertex node = (Vertex) returnVal;
for(;node != null; node = node.next) {
if(node.key.equals(key)) {
return node;
}
}
return null;
} else { //instanceof Edge
level = level + 1;
index = Base10ToBaseX.getBaseXValueOnAtLevel(Base10ToBaseX.Base.BASE8, hash, level);
Edge e = (Edge) returnVal;
returnVal = e.array.get(index);
}
}
}
#Override
public Node createLink(int hash, int level, String key, String val) { //Remove size
for(;;) { //Repeat the work on the current node, since some other thread modified this node
int index = Base10ToBaseX.getBaseXValueOnAtLevel(Base10ToBaseX.Base.BASE8, hash, level);
Node nodeAtIndex = array.get(index);
if ( nodeAtIndex == null) {
Vertex newV = new Vertex(key, val);
boolean result = array.compareAndSet(index, null, newV);
if(result == Boolean.TRUE) {
return newV;
}
//continue; since new node is inserted by other thread, hence repeat it.
}
else if(nodeAtIndex instanceof Vertex) {
Vertex vrtexAtIndex = (Vertex) nodeAtIndex;
int newIndex = Base10ToBaseX.getBaseXValueOnAtLevel(Base10ToBaseX.Base.BASE8, vrtexAtIndex.hashCode(), level+1);
int newIndex1 = Base10ToBaseX.getBaseXValueOnAtLevel(Base10ToBaseX.Base.BASE8, hash, level+1);
Edge edge = new Edge(Base10ToBaseX.Base.BASE8.getLevelZeroMask()+1);
if(newIndex != newIndex1) {
Vertex newV = new Vertex(key, val);
edge.array.set(newIndex, vrtexAtIndex);
edge.array.set(newIndex1, newV);
boolean result = array.compareAndSet(index, vrtexAtIndex, edge); //REPLACE vertex to edge
if(result == Boolean.TRUE) {
return newV;
}
//continue; since vrtexAtIndex may be removed or changed to Edge already.
} else if(vrtexAtIndex.key.hashCode() == hash) {//vrtex.hash == hash) { HERE newIndex == newIndex1
synchronized (vrtexAtIndex) {
boolean result = array.compareAndSet(index, vrtexAtIndex, vrtexAtIndex); //Double check this vertex is not removed.
if(result == Boolean.TRUE) {
Vertex prevV = vrtexAtIndex;
for(;vrtexAtIndex != null; vrtexAtIndex = vrtexAtIndex.next) {
prevV = vrtexAtIndex; // prevV is used to handle when vrtexAtIndex reached NULL
if(vrtexAtIndex.key.equals(key)){
vrtexAtIndex.val = val;
return vrtexAtIndex;
}
}
Vertex newV = new Vertex(key, val);
prevV.next = newV; // Within SYNCHRONIZATION since prevV.next may be added with some other.
return newV;
}
//Continue; vrtexAtIndex got changed
}
} else { //HERE newIndex == newIndex1 BUT vrtex.hash != hash
edge.array.set(newIndex, vrtexAtIndex);
boolean result = array.compareAndSet(index, vrtexAtIndex, edge); //REPLACE vertex to edge
if(result == Boolean.TRUE) {
return edge.createLink(hash, (level + 1), key, val);
}
}
}
else { //instanceof Edge
return nodeAtIndex.createLink(hash, (level + 1), key, val);
}
}
}
#Override
public Node removeLink(String key, int hash, int level){
for(;;) {
int index = Base10ToBaseX.getBaseXValueOnAtLevel(Base10ToBaseX.Base.BASE8, hash, level);
Node returnVal = array.get(index);
if(returnVal == null) {
return null;
}
else if((returnVal instanceof Vertex)) {
synchronized (returnVal) {
Vertex node = (Vertex) returnVal;
if(node.next == null) {
if(node.key.equals(key)) {
boolean result = array.compareAndSet(index, node, null);
if(result == Boolean.TRUE) {
return node;
}
continue; //Vertex may be changed to Edge
}
return null; //Nothing found; This is not the same vertex we are looking for. Here hashcode is same but key is different.
} else {
if(node.key.equals(key)) { //Removing the first node in the link
boolean result = array.compareAndSet(index, node, node.next);
if(result == Boolean.TRUE) {
return node;
}
continue; //Vertex(node) may be changed to Edge, so try again.
}
Vertex prevV = node; // prevV is used to handle when vrtexAtIndex is found and to be removed from its previous
node = node.next;
for(;node != null; prevV = node, node = node.next) {
if(node.key.equals(key)) {
prevV.next = node.next; //Removing other than first node in the link
return node;
}
}
return null; //Nothing found in the linked list.
}
}
} else { //instanceof Edge
return returnVal.removeLink(key, hash, (level + 1));
}
}
}
}
class Base10ToBaseX {
public static enum Base {
/**
* Integer is represented in 32 bit in 32 bit machine.
* There we can split this integer no of bits into multiples of 1,2,4,8,16 bits
*/
BASE2(1,1,32), BASE4(3,2,16), BASE8(7,3,11)/* OCTAL*/, /*BASE10(3,2),*/
BASE16(15, 4, 8){
public String getFormattedValue(int val){
switch(val) {
case 10:
return "A";
case 11:
return "B";
case 12:
return "C";
case 13:
return "D";
case 14:
return "E";
case 15:
return "F";
default:
return "" + val;
}
}
}, /*BASE32(31,5,1),*/ BASE256(255, 8, 4), /*BASE512(511,9),*/ Base65536(65535, 16, 2);
private int LEVEL_0_MASK;
private int LEVEL_1_ROTATION;
private int MAX_ROTATION;
Base(int levelZeroMask, int levelOneRotation, int maxPossibleRotation) {
this.LEVEL_0_MASK = levelZeroMask;
this.LEVEL_1_ROTATION = levelOneRotation;
this.MAX_ROTATION = maxPossibleRotation;
}
int getLevelZeroMask(){
return LEVEL_0_MASK;
}
int getLevelOneRotation(){
return LEVEL_1_ROTATION;
}
int getMaxRotation(){
return MAX_ROTATION;
}
String getFormattedValue(int val){
return "" + val;
}
}
public static int getBaseXValueOnAtLevel(Base base, int on, int level) {
if(level > base.getMaxRotation() || level < 1) {
return 0; //INVALID Input
}
int rotation = base.getLevelOneRotation();
int mask = base.getLevelZeroMask();
if(level > 1) {
rotation = (level-1) * rotation;
mask = mask << rotation;
} else {
rotation = 0;
}
return (on & mask) >>> rotation;
}
}

Categories

Resources