CmisObjectNotFoundException when trying to access my Alfresco repository - java

I'm new with CMIS and Alfresco and I got this error when in try to connect to my Alfresco's repository using AtomPUB binding. I have no idea about the source of my problem. Is it unless a functionality ? Is it my Credential ?
When I install it, I choose only :
- Alfresco community
- Solr4
How should I do if I want to use web services ? Should I install a specific plugin in my Alfresco ?
I got with error :
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/C:/Users/ME%2ME/.m2/repository/org/slf4j/slf4j-simple/1.7.9/slf4j-simple-1.7.9.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/C:/Users/ME%2ME/.m2/repository/ch/qos/logback/logback-classic/1.1.3/logback-classic-1.1.3.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.SimpleLoggerFactory]
Exception in thread "main" org.apache.chemistry.opencmis.commons.exceptions.CmisObjectNotFoundException: Introuvable
at org.apache.chemistry.opencmis.client.bindings.spi.atompub.AbstractAtomPubService.convertStatusCode(AbstractAtomPubService.java:499)
at org.apache.chemistry.opencmis.client.bindings.spi.atompub.AbstractAtomPubService.read(AbstractAtomPubService.java:701)
at org.apache.chemistry.opencmis.client.bindings.spi.atompub.AbstractAtomPubService.getRepositoriesInternal(AbstractAtomPubService.java:873)
at org.apache.chemistry.opencmis.client.bindings.spi.atompub.RepositoryServiceImpl.getRepositoryInfos(RepositoryServiceImpl.java:66)
at org.apache.chemistry.opencmis.client.bindings.impl.RepositoryServiceImpl.getRepositoryInfos(RepositoryServiceImpl.java:92)
at org.apache.chemistry.opencmis.client.runtime.SessionFactoryImpl.getRepositories(SessionFactoryImpl.java:120)
at org.apache.chemistry.opencmis.client.runtime.SessionFactoryImpl.getRepositories(SessionFactoryImpl.java:107)
at fr.omb.TestOMB.connect(TestOMB.java:160)
at fr.omb.TestOMB.main(TestOMB.java:35)
My code :
package fr.omb;
import java.io.ByteArrayInputStream;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.chemistry.opencmis.client.api.CmisObject;
import org.apache.chemistry.opencmis.client.api.Document;
import org.apache.chemistry.opencmis.client.api.Folder;
import org.apache.chemistry.opencmis.client.api.Repository;
import org.apache.chemistry.opencmis.client.api.Session;
import org.apache.chemistry.opencmis.client.api.SessionFactory;
import org.apache.chemistry.opencmis.client.runtime.SessionFactoryImpl;
import org.apache.chemistry.opencmis.commons.PropertyIds;
import org.apache.chemistry.opencmis.commons.SessionParameter;
import org.apache.chemistry.opencmis.commons.data.ContentStream;
import org.apache.chemistry.opencmis.commons.enums.BaseTypeId;
import org.apache.chemistry.opencmis.commons.enums.BindingType;
import org.apache.chemistry.opencmis.commons.enums.UnfileObject;
import org.apache.chemistry.opencmis.commons.enums.VersioningState;
import org.apache.chemistry.opencmis.commons.exceptions.CmisObjectNotFoundException;
import org.apache.commons.lang3.StringUtils;
public class TestOMB {
private static Session session;
private static final String ALFRSCO_ATOMPUB_URL = "http://localhost:8080/alfresco/service/cmis";
private static final String TEST_FOLDER_NAME = "chemistryTestFolder";
private static final String TEST_DOCUMENT_NAME_1 = "chemistryTest1.txt";
private static final String TEST_DOCUMENT_NAME_2 = "chemistryTest2.txt";
public static void main(String[] args) {
Folder root = connect();
cleanup(root, TEST_FOLDER_NAME);
Folder newFolder = createFolder(root, TEST_FOLDER_NAME);
createDocument(newFolder, TEST_DOCUMENT_NAME_1);
createDocument(newFolder, TEST_DOCUMENT_NAME_2);
System.out.println("+++ List Folder +++");
listFolder(0, newFolder);
DeleteDocument(newFolder, "/" + TEST_DOCUMENT_NAME_2);
System.out.println("+++ List Folder +++");
listFolder(0, newFolder);
}
/**
* Clean up test folder before executing test
*
* #param target
* #param delFolderName
*/
private static void cleanup(Folder target, String delFolderName) {
try {
CmisObject object = session.getObjectByPath(target.getPath() + delFolderName);
Folder delFolder = (Folder) object;
delFolder.deleteTree(true, UnfileObject.DELETE, true);
} catch (CmisObjectNotFoundException e) {
System.err.println("No need to clean up.");
}
}
/**
*
* #param target
*/
private static void listFolder(int depth, Folder target) {
String indent = StringUtils.repeat("\t", depth);
for (Iterator<CmisObject> it = target.getChildren().iterator(); it.hasNext();) {
CmisObject o = it.next();
if (BaseTypeId.CMIS_DOCUMENT.equals(o.getBaseTypeId())) {
System.out.println(indent + "[Docment] " + o.getName());
} else if (BaseTypeId.CMIS_FOLDER.equals(o.getBaseTypeId())) {
System.out.println(indent + "[Folder] " + o.getName());
listFolder(++depth, (Folder) o);
}
}
}
/**
* Delete test document
*
* #param target
* #param delDocName
*/
private static void DeleteDocument(Folder target, String delDocName) {
try {
CmisObject object = session.getObjectByPath(target.getPath() + delDocName);
Document delDoc = (Document) object;
delDoc.delete(true);
} catch (CmisObjectNotFoundException e) {
System.err.println("Document is not found: " + delDocName);
}
}
/**
* Create test document with content
*
* #param target
* #param newDocName
*/
private static void createDocument(Folder target, String newDocName) {
Map<String, String> props = new HashMap<String, String>();
props.put(PropertyIds.OBJECT_TYPE_ID, "cmis:document");
props.put(PropertyIds.NAME, newDocName);
System.out.println("This is a test document: " + newDocName);
String content = "aegif Mind Share Leader Generating New Paradigms by aegif corporation.";
byte[] buf = null;
try {
buf = content.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
ByteArrayInputStream input = new ByteArrayInputStream(buf);
ContentStream contentStream = session.getObjectFactory().createContentStream(newDocName, buf.length,
"text/plain; charset=UTF-8", input);
target.createDocument(props, contentStream, VersioningState.MAJOR);
}
/**
* Create test folder directly under target folder
*
* #param target
* #param createFolderName
* #return newly created folder
*/
private static Folder createFolder(Folder target, String newFolderName) {
Map<String, String> props = new HashMap<String, String>();
props.put(PropertyIds.OBJECT_TYPE_ID, "cmis:folder");
props.put(PropertyIds.NAME, newFolderName);
Folder newFolder = target.createFolder(props);
return newFolder;
}
/**
* Connect to alfresco repository
*
* #return root folder object
*/
private static Folder connect() {
SessionFactory sessionFactory = SessionFactoryImpl.newInstance();
Map<String, String> parameters = new HashMap<String, String>();
// User credentials.
parameters.put(SessionParameter.USER, "myuser");
parameters.put(SessionParameter.PASSWORD, "mypassword");
// Connection settings.
parameters.put(SessionParameter.BINDING_TYPE, BindingType.ATOMPUB.value());
parameters.put(SessionParameter.ATOMPUB_URL, ALFRSCO_ATOMPUB_URL);
parameters.put(SessionParameter.AUTH_HTTP_BASIC, "true");
parameters.put(SessionParameter.COOKIES, "true");
parameters.put(SessionParameter.OBJECT_FACTORY_CLASS,
"org.alfresco.cmis.client.impl.AlfrescoObjectFactoryImpl");
// Create session.
// Alfresco only provides one repository.
Repository repository = sessionFactory.getRepositories(parameters).get(0);
Session session = repository.createSession();
return session.getRootFolder();
}
}

I found the solution, it's because of Alfresco's version. Since the V4.x the url of the AtomPUB is http://localhost:8080/alfresco/cmisatom.
https://community.alfresco.com/docs/DOC-5527-cmis
For Alfresco 3.x : http://[host]:[port]/alfresco/service/cmis
For Alfresco 4.0.x, Alfresco 4.1.x and Alfresco 4.2.a-c: http://[host]:[port]/alfresco/cmisatom
For Alfresco 4.2.d-f, Alfresco 5.0 and Alfresco 5.1: http://[host]:[port]/alfresco/api/-default-/public/cmis/versions/1.0/atom

Related

Exception in thread "main" java.lang.NoClassDefFoundError: org/jsoup/Jsoup

I copied a simple web crawler from the internet and then started to run the application in a test class. Every time i try to run the application I get "Exception in thread "main" java.lang.NoClassDefFoundError: org/jsoup/Jsoup" error. I first imported the jsoup jar as a externaljar in a Libary, because I needed it for the http stuff.
Error messages:
Exception in thread "main" java.lang.NoClassDefFoundError: org/jsoup/Jsoup
at com.copiedcrawler.SpiderLeg.crawl(SpiderLeg.java:35)
at com.copiedcrawler.Spider.search(Spider.java:40)
at com.copiedcrawler.SpiderTest.main(SpiderTest.java:9)
Caused by: java.lang.ClassNotFoundException: org.jsoup.Jsoup
at java.base/jdk.internal.loader.BuiltinClassLoader.loadClass(BuiltinClassLoader.java:602)
at java.base/jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass(ClassLoaders.java:178)
at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:522)
... 3 more
Spider Class
package com.copiedcrawler;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
public class Spider
{
private static final int MAX_PAGES_TO_SEARCH = 10;
private Set<String> pagesVisited = new HashSet<String>();
private List<String> pagesToVisit = new LinkedList<String>();
public void search(String url, String searchWord)
{
while(this.pagesVisited.size() < MAX_PAGES_TO_SEARCH)
{
String currentUrl;
SpiderLeg leg = new SpiderLeg();
if(this.pagesToVisit.isEmpty())
{
currentUrl = url;
this.pagesVisited.add(url);
}
else
{
currentUrl = this.nextUrl();
}
leg.crawl(currentUrl); // Lots of stuff happening here. Look at the crawl method in
// SpiderLeg
boolean success = leg.searchForWord(searchWord);
if(success)
{
System.out.println(String.format("**Success** Word %s found at %s", searchWord, currentUrl));
break;
}
this.pagesToVisit.addAll(leg.getLinks());
}
System.out.println("\n**Done** Visited " + this.pagesVisited.size() + " web page(s)");
}
/**
* Returns the next URL to visit (in the order that they were found). We also do a check to make
* sure this method doesn't return a URL that has already been visited.
*
* #return
*/
private String nextUrl()
{
String nextUrl;
do
{
nextUrl = this.pagesToVisit.remove(0);
} while(this.pagesVisited.contains(nextUrl));
this.pagesVisited.add(nextUrl);
return nextUrl;
}
}
SpiderLeg class
package com.copiedcrawler;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
public class SpiderLeg
{
// We'll use a fake USER_AGENT so the web server thinks the robot is a normal web browser.
private static final String USER_AGENT =
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1";
private List<String> links = new LinkedList<String>();
private Document htmlDocument;
/**
* This performs all the work. It makes an HTTP request, checks the response, and then gathers
* up all the links on the page. Perform a searchForWord after the successful crawl
*
* #param url
* - The URL to visit
* #return whether or not the crawl was successful
*/
public boolean crawl(String url)
{
try
{
Connection connection = Jsoup.connect(url).userAgent(USER_AGENT);
Document htmlDocument = connection.get();
this.htmlDocument = htmlDocument;
if(connection.response().statusCode() == 200) // 200 is the HTTP OK status code
// indicating that everything is great.
{
System.out.println("\n**Visiting** Received web page at " + url);
}
if(!connection.response().contentType().contains("text/html"))
{
System.out.println("**Failure** Retrieved something other than HTML");
return false;
}
Elements linksOnPage = htmlDocument.select("a[href]");
System.out.println("Found (" + linksOnPage.size() + ") links");
for(Element link : linksOnPage)
{
this.links.add(link.absUrl("href"));
}
return true;
}
catch(IOException ioe)
{
// We were not successful in our HTTP request
return false;
}
}
/**
* Performs a search on the body of on the HTML document that is retrieved. This method should
* only be called after a successful crawl.
*
* #param searchWord
* - The word or string to look for
* #return whether or not the word was found
*/
public boolean searchForWord(String searchWord)
{
// Defensive coding. This method should only be used after a successful crawl.
if(this.htmlDocument == null)
{
System.out.println("ERROR! Call crawl() before performing analysis on the document");
return false;
}
System.out.println("Searching for the word " + searchWord + "...");
String bodyText = this.htmlDocument.body().text();
return bodyText.toLowerCase().contains(searchWord.toLowerCase());
}
public List<String> getLinks()
{
return this.links;
}
}
SpiderTest class
package com.copiedcrawler;
public class SpiderTest {
public static void main(String[] args) {
// TODO Auto-generated method stub
Spider s1 = new Spider();
s1.search("https://www.w3schools.com/html/", "html");
}
}
Based on stacktrace you are running java program from command line and you forgot to add jsoup into class path. Try running
java -cp classes:libs/jsoup.jar com.copiedcrawler.SpiderTest
Where classes is your program compiled and libs is a folder with libraries.
You might have added the Jsoup Jar File into the Modulepath.
You need to add the JAR file to classpath.
Follow the below steps:
Remove the Jsoup JAR from the libraries.
Project->Build Path->Configure Build Path->Libraries->ClassPath->Add External JARs.
Apply and Close.
Re-run the project.
Now, It should work.

I have a problem with the java SSHClient class, specifically the Expect method does not return data as espected, what could be happening?

I have used the SSHClient class of java to connect to a Juniper router, when entering the console in the putty console it returns the result without problem, but when doing it from the Java code it does not save what is returned by the command, or does not print it, Could you tell me what to do?
I share the ssh class that I am using to guide you in case you encounter any problems:
package com.mycompany;
import static net.sf.expectit.matcher.Matchers.contains;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import net.schmizz.sshj.SSHClient;
import net.schmizz.sshj.connection.channel.direct.Session;
import net.schmizz.sshj.connection.channel.direct.Session.Shell;
import net.schmizz.sshj.transport.verification.PromiscuousVerifier;
import net.sf.expectit.Expect;
import net.sf.expectit.ExpectBuilder;
public class sshj {
/**
* Launch Commands to Provisioning
*
* #param host
* #param port
* #param user
* #param password
* #param commands
* #param splitBy
* #return result
*/
public static String launchCommands(String host, Integer port, String user, String password, String commands,
String commandsSplitBy, String expectSplitBy, Integer timeoutInSeconds) {
String result = "";
StringBuilder wholeBuffer = new StringBuilder();
SSHClient mSSSHClient = new SSHClient();
mSSSHClient.addHostKeyVerifier(new PromiscuousVerifier());
Session mSession = null;
Shell mShell = null;
Expect mExpect = null;
String[] splitCommands = commands.split(commandsSplitBy);
try {
mSSSHClient.connect(host, port);
mSSSHClient.authPassword(user, password);
mSession = mSSSHClient.startSession();
mShell = mSession.startShell();
mExpect = new ExpectBuilder()
.withOutput(mShell.getOutputStream())
.withInputs(mShell.getInputStream())
.withEchoInput(wholeBuffer)
.withEchoOutput(wholeBuffer)
// .withEchoInput(System.out)
// .withEchoOutput(System.err)
.withExceptionOnFailure()
.withTimeout(timeoutInSeconds, TimeUnit.SECONDS).build();
// When expectSplitBy is equals to ""
if ("".equalsIgnoreCase(expectSplitBy)) {
for (String commandExpect : splitCommands) {
mExpect.sendLine(commandExpect);
}
} else { // When expectSplitBy is not equals to ""
for (String commandExpect : splitCommands) {
String[] commandExpectSplit = commandExpect.split(expectSplitBy);
mExpect.sendLine(commandExpectSplit[0]);
mExpect.expect(contains(commandExpectSplit[1]));
}
}
mShell.close();
mSession.close();
mSSSHClient.disconnect();
result = wholeBuffer.toString();
} catch (IOException e) {
result = wholeBuffer.toString().concat(" The Exception is> ").concat(e.toString());
e.printStackTrace();
try {
mExpect.sendLine("exit");
mShell.close();
mSession.close();
mSSSHClient.disconnect();
} catch (IOException e1) {
e1.printStackTrace();
}
}
return result;
}
}
I found the problem, is that the command sent contains some strange characters at the beginning that look like a blank space, and I could eliminate them and the functionality of the class is executed without problem.
I tagged mule because it's where I'm working with this class, which I know is from a third party, that's why I shared all the code so you could do tests. I am very sorry if you do not find my question clear.

Extension of `MavenProjectWizard` possible?

What I want: I have an editor plug-in for my custom DSL. I want to offer the user to set up a new DSL project with a project wizard. Normally these projects are Maven projects, so I want to provide setting up the project directly as a Maven project. To do that I want to extend the class MavenProjectWizardin the package org.eclipse.m2e.core.ui.internal.wizards.MavenProjectWizard and then add another wizard page with the details regarding the DSL project.
What I have: This is my try to do it at the moment:
/*
* Copyright (c) 2017 RWTH Aachen. All rights reserved.
*
* http://www.se-rwth.de/
*/
package de.se_rwth.transformationeditor.wizard;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import org.eclipse.core.resources.IFolder;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.resources.IWorkspaceRoot;
import org.eclipse.core.resources.ResourcesPlugin;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.viewers.IStructuredSelection;
import org.eclipse.jface.wizard.WizardPage;
import org.eclipse.m2e.core.ui.internal.wizards.MavenProjectWizard;
import org.eclipse.ui.IWorkbench;
import org.eclipse.ui.wizards.newresource.BasicNewResourceWizard;
import jline.internal.Log;
/**
* Offers a wizard to create a new Transformation project
*
* #author (last commit) $Philipp Nolte$
* #version $Revision$, $12.1.2017$
* #since 0.0.3
*/
#SuppressWarnings("restriction")
public class CDProjectWizard extends MavenProjectWizard{
protected CDWizardPageOne one;
protected WizardPage currentPage;
private IWorkbench workbench;
public CDProjectWizard() {
super();
}
/**
* #see org.eclipse.jface.wizard.IWizard#addPages()
*/
#Override
public void addPages() {
one = new CDWizardPageOne();
addPage(one);
super.addPages();
}
/**
* #see org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard#init(org.eclipse.ui.IWorkbench,
* org.eclipse.jface.viewers.IStructuredSelection)
*/
#Override
public void init(IWorkbench workbench, IStructuredSelection selection) {
super.init(workbench, selection);
this.workbench = workbench;
}
/**
* #see org.eclipse.jface.wizard.IWizard#getWindowTitle()
*/
#Override
public String getWindowTitle() {
return "New Class Diagram Transformation Project";
}
/**
* Creates a new Transformation project with the project name given in the
* wizard. Inside this project a new folder named "Transformations" is
* created. If a project with the same name already exists, an error window is
* shown. In addition to that, checks if user wants to create CD or MA xample
* files and creates them if wanted.
*
* #see org.eclipse.jface.wizard.IWizard#performFinish()
*/
#Override
public boolean performFinish() {
if (this.canFinish()) {
// Create new project
String projectName = this.one.getProjectNameText();
IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
IProject project = root.getProject(projectName);
try {
if (!project.exists()) {
project.create(null);
}
else {
project.refreshLocal(IResource.DEPTH_INFINITE, null);
MessageDialog.openError(this.workbench.getActiveWorkbenchWindow().getShell(),
"Project creation error", "Project with this name already exists");
return false;
}
if (!project.isOpen()) {
project.open(null);
}
String transformationFolderName = "Transformations";
// Check subfolder name
if (!one.getRootFolderText().isEmpty()) {
transformationFolderName = one.getRootFolderText();
}
IFolder binFolder = project.getFolder(transformationFolderName);
if (!binFolder.exists()) {
one.createNewFolder(binFolder, false, true, null);
// Checks if user wants to create a CD example file
if (one.createCDExampleFile()) {
InputStream demoFileContents = null;
try {
// If the user wants to, an example file is created
URL url = new URL("platform:/plugin/cdtrans-editor/src/main/resources/exampleFiles/RefactorCDs");
InputStream inputStream = url.openConnection().getInputStream();
binFolder.getFile("RefactorCDs.cdtr").create(inputStream, true, null);
}
catch (IOException e) {
Log.error("TransProjectWizard: Error while creating Demo file", e);
MessageDialog.openError(this.workbench.getActiveWorkbenchWindow().getShell(),
"Example file creation error", "There was an error while creating the example file");
}
finally {
if (demoFileContents != null) {
try {
demoFileContents.close();
}
catch (IOException e) {
Log.error("TransProjectWizard: Error while closing file stream", e);
}
}
}
}
BasicNewResourceWizard.selectAndReveal(binFolder, this.workbench.getActiveWorkbenchWindow());
}
}
catch (CoreException e) {
Log.error("TransProjectWizard: Error while creating new Project", e);
}
}
return true;
}
}
But if I start this there is a runtime error if I try to open the wizard which says:
I already tried to export the plug-in and install it in a fresh Eclipse installation, where the m2e package is installed, but with the same result.
Any thoughts on how to fix it?

Google drive API does not list all my files

I'm using a pretty simple sample to retrieve my files on google drive. There are 2 weird things
The first time when I run the sample, google says "the application would like to access your files blahblah". Well, I confirmed. But, the second time, it says "the application would like to Have offline access"... ah... why?
Probably(but I'm not sure), it's after I confirm it the 2nd time, I only get the files that I uploaded by this application
The code is
package com.google.api.services.samples.drive.cmdline;
import com.google.api.client.auth.oauth2.Credential;
import com.google.api.client.extensions.java6.auth.oauth2.AuthorizationCodeInstalledApp;
import com.google.api.client.extensions.jetty.auth.oauth2.LocalServerReceiver;
import com.google.api.client.googleapis.GoogleUtils;
import com.google.api.client.googleapis.auth.oauth2.GoogleAuthorizationCodeFlow;
import com.google.api.client.googleapis.auth.oauth2.GoogleClientSecrets;
import com.google.api.client.googleapis.media.MediaHttpDownloader;
import com.google.api.client.googleapis.media.MediaHttpUploader;
import com.google.api.client.http.FileContent;
import com.google.api.client.http.GenericUrl;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.http.javanet.NetHttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.json.jackson2.JacksonFactory;
import com.google.api.client.util.Preconditions;
import com.google.api.client.util.store.DataStoreFactory;
import com.google.api.client.util.store.FileDataStoreFactory;
import com.google.api.services.drive.Drive;
import com.google.api.services.drive.DriveScopes;
import com.google.api.services.drive.model.File;
import com.google.api.services.drive.model.FileList;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.util.Collections;
import java.util.List;
/**
* A sample application that runs multiple requests against the Drive API. The requests this sample
* makes are:
* <ul>
* <li>Does a resumable media upload</li>
* <li>Updates the uploaded file by renaming it</li>
* <li>Does a resumable media download</li>
* <li>Does a direct media upload</li>
* <li>Does a direct media download</li>
* </ul>
*
* #author rmistry#google.com (Ravi Mistry)
*/
public class DriveSample {
/**
* Be sure to specify the name of your application. If the application name is {#code null} or
* blank, the application will log a warning. Suggested format is "MyCompany-ProductName/1.0".
*/
private static final String APPLICATION_NAME = "testing";
private static final String UPLOAD_FILE_PATH = "c:\\Users\\bnie\\workspace\\Servers\\.project";
private static final String DIR_FOR_DOWNLOADS = "c:\\temp";
private static final java.io.File UPLOAD_FILE = new java.io.File(UPLOAD_FILE_PATH);
/** Directory to store user credentials. */
private static final java.io.File DATA_STORE_DIR =
new java.io.File(System.getProperty("user.home"), ".store/drive_sample");
/**
* Global instance of the {#link DataStoreFactory}. The best practice is to make it a single
* globally shared instance across your application.
*/
private static FileDataStoreFactory dataStoreFactory;
/** Global instance of the HTTP transport. */
private static HttpTransport httpTransport;
/** Global instance of the JSON factory. */
private static final JsonFactory JSON_FACTORY = JacksonFactory.getDefaultInstance();
/** Global Drive API client. */
private static Drive drive;
/** Authorizes the installed application to access user's protected data. */
private static Credential authorize() throws Exception {
// load client secrets
GoogleClientSecrets clientSecrets = GoogleClientSecrets.load(JSON_FACTORY,
new InputStreamReader(DriveSample.class.getResourceAsStream("/client_secrets.json")));
if (clientSecrets.getDetails().getClientId().startsWith("Enter")
|| clientSecrets.getDetails().getClientSecret().startsWith("Enter ")) {
System.out.println(
"Enter Client ID and Secret from https://code.google.com/apis/console/?api=drive "
+ "into drive-cmdline-sample/src/main/resources/client_secrets.json");
System.exit(1);
}
// set up authorization code flow
GoogleAuthorizationCodeFlow flow = new GoogleAuthorizationCodeFlow.Builder(httpTransport,
JSON_FACTORY, clientSecrets, Collections.singleton(DriveScopes.DRIVE_FILE))
.setDataStoreFactory(dataStoreFactory).build();
// authorize
return new AuthorizationCodeInstalledApp(flow, new LocalServerReceiver()).authorize("user");
}
public static void main(String[] args) {
Preconditions.checkArgument(
!UPLOAD_FILE_PATH.startsWith("Enter ") && !DIR_FOR_DOWNLOADS.startsWith("Enter "),
"Please enter the upload file path and download directory in %s", DriveSample.class);
try {
// httpTransport = GoogleNetHttpTransport.newTrustedTransport();
NetHttpTransport.Builder builder = new NetHttpTransport.Builder();
builder.trustCertificates(GoogleUtils.getCertificateTrustStore());
httpTransport = builder.build();
dataStoreFactory = new FileDataStoreFactory(DATA_STORE_DIR);
// authorization
Credential credential = authorize();
// set up the global Drive instance
drive = new Drive.Builder(httpTransport, JSON_FACTORY, credential)
.setApplicationName(APPLICATION_NAME).build();
FileList l = drive.files().list().setMaxResults(10).execute();
List<File> files = l.getItems();
if (files == null || files.size() == 0) {
System.out.println("No files found.");
} else {
System.out.println("Files:");
for (File file : files) {
System.out.printf("%s (%s)\n", file.getTitle(), file.getId());
}
}
// run commands
// View.header1("Starting Resumable Media Upload");
// File uploadedFile = uploadFile(false);
//
// View.header1("Updating Uploaded File Name");
// File updatedFile = updateFileWithTestSuffix(uploadedFile.getId());
//
// View.header1("Starting Resumable Media Download");
// downloadFile(false, updatedFile);
//
// View.header1("Starting Simple Media Upload");
// uploadedFile = uploadFile(true);
//
// View.header1("Starting Simple Media Download");
// downloadFile(true, uploadedFile);
//
// View.header1("Success!");
return;
} catch (IOException e) {
System.err.println(e.getMessage());
} catch (Throwable t) {
t.printStackTrace();
}
System.exit(1);
}
/** Uploads a file using either resumable or direct media upload. */
private static File uploadFile(boolean useDirectUpload) throws IOException {
File fileMetadata = new File();
fileMetadata.setTitle(UPLOAD_FILE.getName());
FileContent mediaContent = new FileContent("image/jpeg", UPLOAD_FILE);
Drive.Files.Insert insert = drive.files().insert(fileMetadata, mediaContent);
MediaHttpUploader uploader = insert.getMediaHttpUploader();
uploader.setDirectUploadEnabled(useDirectUpload);
uploader.setProgressListener(new FileUploadProgressListener());
return insert.execute();
}
/** Updates the name of the uploaded file to have a "drivetest-" prefix. */
private static File updateFileWithTestSuffix(String id) throws IOException {
File fileMetadata = new File();
fileMetadata.setTitle("drivetest-" + UPLOAD_FILE.getName());
Drive.Files.Update update = drive.files().update(id, fileMetadata);
return update.execute();
}
/** Downloads a file using either resumable or direct media download. */
private static void downloadFile(boolean useDirectDownload, File uploadedFile)
throws IOException {
// create parent directory (if necessary)
java.io.File parentDir = new java.io.File(DIR_FOR_DOWNLOADS);
if (!parentDir.exists() && !parentDir.mkdirs()) {
throw new IOException("Unable to create parent directory");
}
OutputStream out = new FileOutputStream(new java.io.File(parentDir, uploadedFile.getTitle()));
MediaHttpDownloader downloader =
new MediaHttpDownloader(httpTransport, drive.getRequestFactory().getInitializer());
downloader.setDirectDownloadEnabled(useDirectDownload);
downloader.setProgressListener(new FileDownloadProgressListener());
downloader.download(new GenericUrl(uploadedFile.getDownloadUrl()), out);
}
}
Try to change the scope that you used. Used the scope DriveScopes.DRIVE instead of DriveScopes.DRIVE_FILE. For more information about scopes, check this documentation.
Also check this related SO question.

Error importing classes for RDF crawler

I'm using a rdf crawler, in that I had a class named as:
import edu.unika.aifb.rdf.crawler.*;
import com.hp.hpl.jena.rdf.model.*;
import com.hp.hpl.jena.util.FileManager;
These are class file termed as error, and I try with jena packages but I had attached, it does not make any changes.
Update:
Full SampleCrawl.java class content:
import java.util.*;
import edu.unika.aifb.rdf.crawler.*;
/**
* Call this class with 3 arguments - URL to crawl to,
* depth and time in seconds
*/
public class SampleCrawl {
/**
* #param uRI
* #param depth
* #param time
*/
#SuppressWarnings("rawtypes")
public SampleCrawl(Vector uRI, Vector hf, int depth, int time){
// Initialize Crawling parameters
CrawlConsole c = new CrawlConsole(uRI,hf,depth,time);
// get an ontology file from its local location
// (OPTIONAL)
c.setLocalNamespace("http://www.daml.org/2000/10/daml-ont","c:\\temp\\rdf\\schemas\\daml-ont.rdf");
// set all the paths to get all the results
c.setLogPath("c:\\temp\\crawllog.xml");
c.setCachePath("c:\\temp\\crawlcache.txt");
c.setModelPath("c:\\temp\\crawlmodel.rdf");
try{
// crawl and get RDF model
c.start();
// This writes all three result files out
c.writeResults();
}catch(Exception e){
}
}
/**
* #param args
* #throws Exception
*/
#SuppressWarnings({ "rawtypes", "unchecked" })
public static void main(String[] args) throws Exception {
if (args.length != 3) {
System.err.println("Usage: java -cp [JARs] SampleCrawl [URL] [depth:int] [time:int]");
System.exit(0);
}
Vector uris = new Vector();
uris.add(args[0]);
// no host filtering - crawl to all hosts
Vector hostfilter = null;
/* You may want to do something else to enable host filtering:
* Vector hostfilter = new Vector();
* hostfilter.add("http://www.w3.org");
*/
int depth = 2;
int time = 60;
try {
depth = Integer.parseInt(args[1]);
time = Integer.parseInt(args[2]);
}
catch (Exception e) {
System.err.println("Illegal argument types:");
System.err.println("Argument list: URI:String depth:int time(s):int");
System.exit(0);
}
new SampleCrawl(uris,hostfilter,depth,time);
}
}
Question:
How to add import edu.unika.aifb.rdf.crawler.; error occurs here
I googled the package that you're trying to import, and it appears that you're using Kaon. Assuming that's so, you have made an error in your import declaration. You have:
import edu.unika.aifb.rdf.crawler.*;
whereas the download available on SourceForge would require:
import edu.unika.aifb.rdf.rdfcrawler.*;
As an aside, it would be helpful if you would include information, such as "I'm trying to use Kaon's rdfcrawler from ..." in your question. Otherwise, we have to try to guess important details in your setup.

Categories

Resources