I am working on true SSO in Java application running on Windows 10. My application already has Kerberos auth using Java's GSSAPI (but it obviously does not work on any modern Windows, especially with strict security policies and domain users), so I would like to replace current authorization system with Waffle with minimal implications to overall app design - which I think should be possible if I can get KerberosTicket instance somehow. I am struggling writing this functionality, so far I managed to request some token, but I don't really know what this token is, it does not match Kerberos ticket format. Here is my (actually more like found online code):
public byte[] getServiceTicketSSPI() {
final String securityPackage = "Kerberos";
final String targetName = "<disclosed>";
IWindowsCredentialsHandle clientCredentials = null;
WindowsSecurityContextImpl clientContext = null;
final String currentUser = WindowsAccountImpl.getCurrentUsername();
try {
clientCredentials = WindowsCredentialsHandleImpl.getCurrent(securityPackage);
clientCredentials.initialize();
// initial client security context
clientContext = new WindowsSecurityContextImpl();
clientContext.setPrincipalName(currentUser);
clientContext.setCredentialsHandle(clientCredentials);
clientContext.setSecurityPackage(securityPackage);
final Sspi.SecBufferDesc continueToken = null;
do {
if(debug)
System.out.println("Using target name: " + targetName);
clientContext.initialize(clientContext.getHandle(), continueToken, targetName);
} while(clientContext.isContinue());
return clientContext.getToken();
} finally {
if (clientContext != null)
clientContext.dispose();
if (clientCredentials != null)
clientCredentials.dispose();
}
}
To be fair I am not even sure if SSPI allows me to actually see real ticket. Am I even going in right direction with this snippet? I will be really happy so see any clues as to what should I do. It would be perfect to have KerberosTicket instance in the end.
Below are the steps to do Single Sign On using Waffle for standalone Java Client without using server.
Create client credentials
Get service ticket using initializeSecurityContext of WindowsSecurityContextImpl.
Get WindowsIdentity using accessSecurityContext of WindowsAuthProviderImpl
Original link https://exceptionshub.com/getting-kerberos-service-ticket-using-waffle-in-java.html
For client-server sso, you should follow https://code.dblock.org/2010/04/08/pure-java-waffle.html The code below depicts the standalone java sso using kerberos.
import com.sun.jna.platform.win32.Sspi;
import waffle.windows.auth.IWindowsCredentialsHandle;
import waffle.windows.auth.IWindowsIdentity;
import waffle.windows.auth.IWindowsSecurityContext;
import waffle.windows.auth.impl.WindowsAccountImpl;
import waffle.windows.auth.impl.WindowsAuthProviderImpl;
import waffle.windows.auth.impl.WindowsCredentialsHandleImpl;
import waffle.windows.auth.impl.WindowsSecurityContextImpl;
public class KerberosSingleSignOn {
public static void main() {
try {
System.out.println(getWindowsIdentity().getFqn());
}
catch (Exception e) {
e.printStackTrace();
}
}
public static IWindowsIdentity getWindowsIdentity() throws Exception {
try {
byte[] kerberosToken = getServiceTicketSSPI();
WindowsAuthProviderImpl provider = new WindowsAuthProviderImpl();
IWindowsSecurityContext securityContext = provider
.acceptSecurityToken("client-connection", kerberosToken, "Kerberos");
return securityContext.getIdentity();
}
catch (Exception e) {
throw new Exception("Failed to process kerberos token");
}
}
public static byte[] getServiceTicketSSPI() throws Exception {
final String securityPackage = "Kerberos";
IWindowsCredentialsHandle clientCredentials = null;
WindowsSecurityContextImpl clientContext = null;
final String currentUser = WindowsAccountImpl.getCurrentUsername();
try {
clientCredentials = WindowsCredentialsHandleImpl.getCurrent(securityPackage);
clientCredentials.initialize();
// initial client security context
clientContext = new WindowsSecurityContextImpl();
clientContext.setCredentialsHandle(clientCredentials.getHandle());
/*OR
clientContext.setCredentialsHandle(clientCredentials);
*/
clientContext.setSecurityPackage(securityPackage);
final Sspi.SecBufferDesc continueToken = null;
do {
System.out.println("Using current username: " + currentUser);
clientContext.initialize(clientContext.getHandle(), continueToken, currentUser);
}
while (clientContext.isContinue());
return clientContext.getToken();
}
catch (Exception e) {
throw new Exception("Failed to process kerberos token");
}
finally {
if (clientContext != null)
clientContext.dispose();
if (clientCredentials != null)
clientCredentials.dispose();
}
}
}
Related
I'm using AWS Keyspace (Cassandra 3.11.2) run on Apache Flink in AWS EMR. Some time below query throws Exception. The same code used on AWS Lambda also had the same Exception NoHost. What did I do wrong?
String query = "INSERT INTO TEST (field1, field2) VALUES(?, ?)";
PreparedStatement prepared = CassandraConnector.prepare(query);
int i = 0;
BoundStatement bound = prepared.bind().setString(i++, "Field1").setString(i++, "Field2")
.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
ResultSet rs = CassandraConnector.execute(bound);
at com.datastax.oss.driver.api.core.NoNodeAvailableException.copy(NoNodeAvailableException.java:40)
at com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures.getUninterruptibly(CompletableFutures.java:149)
at com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor.process(CqlRequestSyncProcessor.java:53)
at com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor.process(CqlRequestSyncProcessor.java:30)
at com.datastax.oss.driver.internal.core.session.DefaultSession.execute(DefaultSession.java:230)
at com.datastax.oss.driver.api.core.cql.SyncCqlSession.execute(SyncCqlSession.java:53)
at com.test.manager.connectors.CassandraConnector.execute(CassandraConnector.java:16)
at com.test.repository.impl.BackupRepositoryImpl.insert(BackupRepositoryImpl.java:36)
at com.test.service.impl.BackupServiceImpl.insert(BackupServiceImpl.java:18)
at com.test.flink.function.AsyncBackupFunction.processMessage(AsyncBackupFunction.java:78)
at com.test.flink.function.AsyncBackupFunction.lambda$asyncInvoke$0(AsyncBackupFunction.java:35)
at java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1604)
at java.util.concurrent.CompletableFuture$AsyncSupply.exec(CompletableFuture.java:1596)
at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
This is my code:
CassandraConnector.java:
Because cost of init preparedStatement is huge, I'm cached this.
public class CassandraConnector {
private static final ConcurrentHashMap<String, PreparedStatement> preparedStatementCache = new ConcurrentHashMap<String, PreparedStatement>();
public static ResultSet execute(BoundStatement bound) {
CqlSession session = CassandraManager.getSessionInstance();
return session.execute(bound);
}
public static ResultSet execute(String query) {
CqlSession session = CassandraManager.getSessionInstance();
return session.execute(query);
}
public static PreparedStatement prepare(String query) {
PreparedStatement result = preparedStatementCache.get(query);
if (result == null) {
CqlSession session = CassandraManager.getSessionInstance();
result = session.prepare(query);
preparedStatementCache.putIfAbsent(query, result);
}
return result;
}
}
CassandraManager.java:
I'm using singleton double-check locking for session object.
public class CassandraManager {
private static final Logger logger = LoggerFactory.getLogger(CassandraManager.class);
private static final String SSL_CASSANDRA_PASSWORD = "password";
private static volatile CqlSession session;
static {
try {
initSession();
} catch (Exception e) {
logger.error("Error CassandraManager getSessionInstance", e);
}
}
private static void initSession() {
List<InetSocketAddress> contactPoints = Collections.singletonList(InetSocketAddress.createUnresolved(
"cassandra.ap-southeast-1.amazonaws.com", 9142));
DriverConfigLoader loader = DriverConfigLoader.fromClasspath("application.conf");
Long start = BaseHelper.getTime();
session = CqlSession.builder().addContactPoints(contactPoints).withConfigLoader(loader)
.withAuthCredentials(AppUtil.getProperty("cassandra.username"),
AppUtil.getProperty("cassandra.password"))
.withSslContext(getSSLContext()).withLocalDatacenter("ap-southeast-1")
.withKeyspace(AppUtil.getProperty("cassandra.keyspace")).build();
logger.info("End connect: " + (new Date().getTime() - start));
}
public static CqlSession getSessionInstance() {
if (session == null || session.isClosed()) {
synchronized (CassandraManager.class) {
if (session == null || session.isClosed()) {
initSession();
}
}
}
return session;
}
public static SSLContext getSSLContext() {
InputStream in = null;
try {
KeyStore ks = KeyStore.getInstance("JKS");
in = CassandraManager.class.getClassLoader().getResourceAsStream("cassandra_truststore.jks");
ks.load(in, SSL_CASSANDRA_PASSWORD.toCharArray());
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(ks);
SSLContext ctx = SSLContext.getInstance("TLS");
ctx.init(null, tmf.getTrustManagers(), null);
return ctx;
} catch (Exception e) {
logger.error("Error CassandraConnector getSSLContext", e);
} finally {
if (in != null) {
try {
in.close();
} catch (IOException e) {
logger.error("", e);
}
}
}
return null;
}
}
application.conf
datastax-java-driver {
basic.request {
timeout = 5 seconds
consistency = LOCAL_ONE
}
advanced.connection {
max-requests-per-connection = 1024
pool {
local.size = 1
remote.size = 1
}
}
advanced.reconnect-on-init = true
advanced.reconnection-policy {
class = ExponentialReconnectionPolicy
base-delay = 1 second
max-delay = 60 seconds
}
advanced.retry-policy {
class = DefaultRetryPolicy
}
advanced.protocol {
version = V4
}
advanced.heartbeat {
interval = 30 seconds
timeout = 1 second
}
advanced.session-leak.threshold = 8
advanced.metadata.token-map.enabled = false
}
There are two scenarios where the driver would report NoNodeAvailableException:
Nodes are unresponsive/unavailable and the driver has marked all of them as down.
All the contact points provided are invalid.
If some inserts are working but eventually runs into NoNodeAvailableException, that indicates to me that the nodes are getting overloaded and eventually become unresponsive so the driver no longer picks a coordinator since they're all marked as "down".
If none of the requests work at all, it means that the contact points are unreachable or unresolvable so the driver can't connect to the cluster. Cheers!
The NoHostAvailableException is a client side exception thrown by the open source driver after it has retried available hosts. The open source driver encapsulated the root cause for retry, which can be confusing.
I suggest first improving you observability by setting up these CloudWatch metrics. You can follow this prebuild CloudFormation template to get started it only takes a few seconds.
Here is a set up for Keyspace & Table Metrics for Amazon Keyspaces using Cloud Watch:
https://github.com/aws-samples/amazon-keyspaces-cloudwatch-cloudformation-templates
You can also replace retry policy with the following examples found in this helper project. The retry policy in this project will either try or throw the original exception which will remove the occurrences of NoHostAvailableException this will provide you with better transparency to your application. Here's the like to the Github repo: https://github.com/aws-samples/amazon-keyspaces-java-driver-helpers
If you're using the private VPC endpoint you want to add the following permissions to enable more entries in the system.peers table.,
Amazon Keyspaces just announced new functionality that will provide more connection points when establishing a session with a private VPC endpoints.
Here is a link about how Keyspaces now automatically optimizes client connection made through AWS PrivateLink to improve availability and write and read: https://aws.amazon.com/about-aws/whats-new/2021/07/amazon-keyspaces-for-apache-cassandra-now-automatically-optimi/
This link that talks about Using Amazon Keypscaes with Interface VPC Endpoints: https://docs.aws.amazon.com/keyspaces/latest/devguide/vpc-endpoints.html . To enable this new functionality you will need to provide additional permissions to DescribeNetworkInterfaces and DescribeVpcEndpoints.
{
"Version":"2012-10-17",
"Statement":[
{
"Sid":"ListVPCEndpoints",
"Effect":"Allow",
"Action":[
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeVpcEndpoints"
],
"Resource":"*"
}
]
}
I suspect that this:
.withLocalDatacenter(AppUtil.getProperty("cassandra.localdatacenter"))
Pulls back a data center name which either does not match the keyspace replication definition or the configured data center name:
nodetool status | grep Datacenter
Basically, if your connection is defined with a local data center which does not exist, it will still try to read/write with replicas in that data center. This will fail, because it obviously cannot find nodes in a non-existent data center.
Similar question here: NoHostAvailable error in cqlsh console
I have written a RESTful API using Apache Jersey. I am using MongoDB as my backend. I used Morphia (v.1.3.4) to map and persist POJO to database. I tried to follow "1 application 1 connection" in my API as recommended everywhere but I am not sure I am successful. I run my API in Tomcat 8. I also ran Mongostat to see the details and connection. At start, Mongostat showed 1 connection to MongoDB server. I tested my API using Postman and it was working fine. I then created a load test in SoapUI where I simulated 100 users per second. I saw the update in Mongostat. I saw there were 103 connections. Here is the gif which shows this behaviour.
I am not sure why there are so many connections. The interesting fact is that number of mongo connection are directly proportional to number of users I create on SoapUI. Why is that? I found other similar questions but I think I have implemented there suggestions.
Mongo connection leak with morphia
Spring data mongodb not closing mongodb connections
My code looks like this.
DatabaseConnection.java
// Some imports
public class DatabaseConnection {
private static volatile MongoClient instance;
private static String cloudhost="localhost";
private DatabaseConnection() { }
public synchronized static MongoClient getMongoClient() {
if (instance == null ) {
synchronized (DatabaseConnection.class) {
if (instance == null) {
ServerAddress addr = new ServerAddress(cloudhost, 27017);
List<MongoCredential> credentialsList = new ArrayList<MongoCredential>();
MongoCredential credentia = MongoCredential.createCredential(
"test", "test", "test".toCharArray());
credentialsList.add(credentia);
instance = new MongoClient(addr, credentialsList);
}
}
}
return instance;
}
}
PourService.java
#Secured
#Path("pours")
public class PourService {
final static Logger logger = Logger.getLogger(Pour.class);
private static final int POUR_SIZE = 30;
#POST
#Consumes(MediaType.APPLICATION_JSON)
#Produces(MediaType.APPLICATION_JSON)
public Response createPour(String request)
{
WebApiResponse response = new WebApiResponse();
Gson gson = new GsonBuilder().setDateFormat("dd/MM/yyyy HH:mm:ss").create();
String message = "Pour was not created.";
HashMap<String, Object> data = null;
try
{
Pour pour = gson.fromJson(request, Pour.class);
// Storing the pour to
PourRepository pourRepository = new PourRepository();
String id = pourRepository.createPour(pour);
data = new HashMap<String, Object>();
if ("" != id && null != id)
{
data.put("id", id);
message = "Pour was created successfully.";
logger.debug(message);
return response.build(true, message, data, 200);
}
logger.debug(message);
return response.build(false, message, data, 500);
}
catch (Exception e)
{
message = "Error while creating Pour.";
logger.error(message, e);
return response.build(false, message, new Object(),500);
}
}
PourDao.java
public class PourDao extends BasicDAO<Pour, String>{
public PourDao(Class<Pour> entityClass, Datastore ds) {
super(entityClass, ds);
}
}
PourRepository.java
public class PourRepository {
private PourDao pourDao;
final static Logger logger = Logger.getLogger(PourRepository.class);
public PourRepository ()
{
try
{
MongoClient mongoClient = DatabaseConnection.getMongoClient();
Datastore ds = new Morphia().map(Pour.class)
.createDatastore(mongoClient, "tilt45");
pourDao = new PourDao(Pour.class,ds);
}
catch (Exception e)
{
logger.error("Error while creating PourDao", e);
}
}
public String createPour (Pour pour)
{
try
{
return pourDao.save(pour).getId().toString();
}
catch (Exception e)
{
logger.error("Error while creating Pour.", e);
return null;
}
}
}
When I work with Mongo+Morphia I get better results using a Factory pattern for the Datastore and not for the MongoClient, for instance, check the following class:
public DatastoreFactory(String dbHost, int dbPort, String dbName) {
final Morphia morphia = new Morphia();
MongoClientOptions.Builder options = MongoClientOptions.builder().socketKeepAlive(true);
morphia.getMapper().getOptions().setStoreEmpties(true);
final Datastore store = morphia.createDatastore(new MongoClient(new ServerAddress(dbHost, dbPort), options.build()), dbName);
store.ensureIndexes();
this.datastore = store;
}
With that approach, everytime you need a datastore you can use the one provided by the factory. Of course, this can implemented better if you use a framework/library that support factory pattern (e.g.: HK2 with org.glassfish.hk2.api.Factory), and also singleton binding.
Besides, you can check the documentation of MongoClientOptions's builder method, perhaps you can find a better connection control there.
I'm trying to implement this example,
https://google-developers.appspot.com/drive/auth/web-server
however the following classes are not found! Oauth2, Userinfo
static User getUserInfo(Credential credentials)
throws NoUserIdException {
Oauth2 userInfoService =
new Oauth2.Builder(new NetHttpTransport(), new JacksonFactory(), credentials).build();
Userinfo userInfo = null;
try {
userInfo = userInfoService.userinfo().get().execute();
} catch (IOException e) {
System.err.println("An error occurred: " + e);
}
if (userInfo != null && userInfo.getId() != null) {
return userInfo;
} else {
throw new NoUserIdException();
}
}
All other classes were found. I have the latest Drive libraries in my build path (Eclipse).
This example is all kinds of messed up. I've implemented OAuth before and this is just way too verbose and over-complicated. It's not even obvious where the user is sent to Google to authenticate.
Had to add
https://developers.google.com/api-client-library/java/apis/oauth2/v2
Drive SDK has com.google.api.client.auth.oauth2, but not com.google.api.services.oauth2! Go figure!
And it STILL doesn't work.
Type mismatch: cannot convert from Userinfo to User
So I had to change the return type to Userinfo.
I am a bit new to WS portal & have a requirement wherein on a specific scenario we want the user to logout & redirect him to the default portal login page.
I have tried to invalidate the session clear the cookies & do a response.sendRedirect as in response.sendRedirect("/wps/myportal/"); ,
but in vain.
Please guide.
You can configure logout page using wp_configservice:
redirect.logout=true
redirect.logout.ssl=false
redirect.logout.url=protocol://host_name/logout_page
After configuring it, clicking on standard logout will redirect you to the specified page.
If you're using JSF portlets, you can do something along the lines of the following - this should actually log the user session out as well as returning them to the default login page.
In reality you'll probably want to consider caching the initial context lookup by moving out the lookup code ( Context ctx = new InitialContext();
PortletServiceHome stateMgrServiceHome = (PortletServiceHome) ctx.lookup("portletservice/com.ibm.portal.state.service.PortletStateManagerService");)
public void doLogOut()
{
String logoutUrl = generateLogoutURL(getPortletRequest(), getPortletResponse());
redirectToUrl(logoutUrl);
}
public String generateLogoutURL(PortletRequest request, PortletResponse response) throws RpmPortalException {
final String methodName = "generateLogoutURL";
String logoutURL = "";
Context ctx = new InitialContext();
PortletServiceHome stateMgrServiceHome = (PortletServiceHome) ctx.lookup("portletservice/com.ibm.portal.state.service.PortletStateManagerService");
PortletStateManager stateMgr = null;
LogoutActionAccessorController logoutCtrl = null;
try {
stateMgr = stateMgrService.getPortletStateManager(request, response);
final URLFactory urlFactory = stateMgr.getURLFactory();
EngineURL url = urlFactory.newURL(null);
LogoutActionAccessorFactory logoutFct = (LogoutActionAccessorFactory) stateMgr.getAccessorFactory(LogoutActionAccessorFactory.class);
logoutCtrl = logoutFct.newLogoutActionController(url.getState());
logoutURL = url.writeDispose(new StringWriter()).toString();
} catch (StateException e) {
//do whatever you want
} catch (IOException e) {
//do whatever you want
} finally {
if (stateMgr != null) {
stateMgr.dispose();
}
if (logoutCtrl != null) {
logoutCtrl.dispose();
}
}
return logoutURL;
}
public void redirectToUrl(String url) {
try {
FacesContext context = getFacesContext();
if (context != null) {
context.getExternalContext().redirect(url);
context.responseComplete();
}
} catch (IOException e) {
//Do whatever you want
}
}
In the standard configuration of Portal, directing a user to any page with /wps/portal... as the root instead of /wps/myportal/... will force the user to log out and the session to end. So you could just create a friendly URL for your login page and redirect the user to /wps/portal/friendlyLoginUrl
Just an addition to what zargarf said, by default the following js is executed when you click on logout is mentioned below :
javascript:if(stproxy && stproxy.isLoggedIn){stproxy.login.logout();}
I got another JCo-related question and hopefully finding help.
With JCo you can easily build up a connection like it is explained in the example sheets which came with the JCo-library. Unfortunately, the only way building a connection is handled with a created property file. It wouldn´t be that bad, if there wasn´t any sensible data in it. But at least, the password for the SAP user stands in the file, so it is a lack of safety in this way of connection-handling. The manual of JCo says so, too :
"For this example the destination configuration is stored in a file that is called by the program. In practice you should avoid this for security reasons."
but couldn´t find a working solution after all. There are a palmful threads about this theme, like this
http://forums.sdn.sap.com/thread.jspa?messageID=7303957
but none of them are helpful. I really can´t figure out a solution and neither find one. Actually I solved the security-problem with deleting the file after building the connection, but this is not a satisfying solution. There have to be a better way getting the parameter for the connection, especially when it stands in the manual, but I have no glue how.
Anybody already worked with JCo 3.0 and knows this problem?
Yes, that's possible. You have to create your own implementation of DestinationDataProvider and register it using Environment.registerDestinationDataProvider(). However your DDP obtains the connection data and credentials is up to you. Take a look at net.sf.rcer.conn.connections.ConnectionManager, there's a working example in there.
You need to
copy the private class starting on line 66 and adapt it to your own needs (that is, fetch the connection data from wherever you want to)
perform the registration (line 204) somewhere during the startup of your application
get the connection using some string identifier that will be passed to your DestinationDataProvider.
It's a bit confusing, it was dificult to me how to figure this too.
All you need is an object of type java.util.Properties to fill the desired fields, but it's up to ou how to fill this object.
I dit it through a ValueObject, I can fill this VO from a file, database, web form...
JCOProvider jcoProvider = null;
SAPVO sap = new SAPVO(); // Value Object
Properties properties = new Properties();
if(jcoProvider == null) {
// Get SAP config from DB
try {
sap = SAPDAO.getSAPConfig(); // DAO object that gets conn data from DB
} catch (Exception ex) {
throw new ConexionSAPException(ex.getMessage());
}
// Create new conn
jcoProvider = new JCOProvider();
}
properties.setProperty(DestinationDataProvider.JCO_ASHOST, sap.getJCO_ASHOST());
properties.setProperty(DestinationDataProvider.JCO_SYSNR, sap.getJCO_SYSNR());
properties.setProperty(DestinationDataProvider.JCO_CLIENT, sap.getJCO_CLIENT());
properties.setProperty(DestinationDataProvider.JCO_USER, sap.getJCO_USER());
properties.setProperty(DestinationDataProvider.JCO_PASSWD, sap.getJCO_PASSWD());
properties.setProperty(DestinationDataProvider.JCO_LANG, sap.getJCO_LANG());
// properties.setProperty(DestinationDataProvider.JCO_TRACE, "10");
try {
jcoProvider.changePropertiesForABAP_AS(properties);
} catch (Exception e) {
throw new ConexionSAPException(e.getMessage());
}
The JCOProvider class:
import com.sap.conn.jco.ext.DestinationDataEventListener;
import com.sap.conn.jco.ext.DestinationDataProvider;
import com.sap.conn.jco.ext.Environment;
import es.grupotec.ejb.util.ConexionSAPException;
import java.util.Properties;
public class JCOProvider implements DestinationDataProvider {
private String SAP_SERVER = "SAPSERVER";
private DestinationDataEventListener eventListener;
private Properties ABAP_AS_properties;
public JCOProvider() {
}
#Override
public Properties getDestinationProperties(String name) {
if (name.equals(SAP_SERVER) && ABAP_AS_properties != null) {
return ABAP_AS_properties;
} else {
return null;
}
// if(ABAP_AS_properties!=null) return ABAP_AS_properties;
// else throw new RuntimeException("Destination " + name + " is not available");
}
#Override
public boolean supportsEvents() {
return true;
}
#Override
public void setDestinationDataEventListener(DestinationDataEventListener eventListener) {
this.eventListener = eventListener;
}
public void changePropertiesForABAP_AS(Properties properties) throws ConexionSAPException {
try {
if (!Environment.isDestinationDataProviderRegistered()) {
if (ABAP_AS_properties == null) {
ABAP_AS_properties = properties;
}
Environment.registerDestinationDataProvider(this);
}
if (properties == null) {
if (eventListener != null) {
eventListener.deleted(SAP_SERVER);
}
ABAP_AS_properties = null;
} else {
ABAP_AS_properties = properties;
if (eventListener != null) {
eventListener.updated(SAP_SERVER);
}
}
} catch (Exception ex) {
throw new ConexionSAPException(ex.getMessage());
}
}
}
Regards