Reading xml messages from ibm mq - java

I am trying to retrieve some XML messages as a readable string straight from IBM MQ, AND render them on a UI.
I used the following code and I get the error. GET Exception: com.IBM.mq.MQException: MQJE001: Completion Code '1', Reason '2110'.
What changes could I possibly make to get the messages back as a string? However, when the messages are of the format MQSTR they are rendered on the UI.
MQQueueManager _queueManager = null;
int port = inputPort;
String hostname = host;
String channel = chanel;
String qManager = queuemanager;
String inputQName = queuename;
MQEnvironment.hostname = hostname;
MQEnvironment.channel = channel;
MQEnvironment.port = port;
_queueManager = new MQQueueManager(qManager);
int openOptions = MQC.MQOO_INQUIRE + MQC.MQOO_FAIL_IF_QUIESCING + MQC.MQOO_BROWSE;
MQQueue queue = _queueManager.accessQueue( inputQName,
openOptions,
null, // default q manager
null, // no dynamic q name
null ); // no alternate user id
System.out.println("MQRead is now connected.\n");
int depth = queue.getCurrentDepth();
System.out.println("Current depth: " + depth + "\n");
if (depth == 0)
{
System.out.println("Depth is zero");
}
MQGetMessageOptions getOptions = new MQGetMessageOptions();
getOptions.options = MQC.MQGMO_NO_WAIT + MQC.MQGMO_FAIL_IF_QUIESCING + MQC.MQGMO_CONVERT + MQC.MQGMO_BROWSE_NEXT;
ArrayList<MessageDTO> myMessages = new ArrayList<>();
while(true)
{
MQMessage message = new MQMessage();
try
{
queue.get(message, getOptions);
byte[] b = new byte[message.getMessageLength()];
message.readFully(b);
System.out.println (new MQHeaderList (message, false));
String newMessage = new String(b);
MessageDTO newMsg = new MessageDTO();
newMsg.setMessage(newMessage);
Random rand = new Random();
newMsg.setMessageNumber(rand.nextInt());
myMessages.add(newMsg);
model.addAttribute("myMessages", myMessages);
System.out.println(myMessages);
message.clearMessage();
}
catch (IOException e)
{
System.out.println("IOException during GET: " + e.getMessage());
break;
}
catch (MQException e)
{
if (e.completionCode == 2 && e.reasonCode == MQException.MQRC_NO_MSG_AVAILABLE) {
if (depth > 0)
{
System.out.println("All messages read.");
}
}
else
{
System.out.println("GET Exception: " + e);
}
break;
} catch (MQDataException e) {
e.printStackTrace();
}
}
queue.close();
_queueManager.disconnect();
}
}```

I hope this helps someone one day, but the issue was caused by a format error, I ended up using the RFHUtil and changed the message format to to MQSTR and I got my messages back as desired.

Related

i'm receiving SNMP traps from network device (switchs), but how can i identify that this trap is coming from which device (switch)

I'm receiving SNMP traps from network device (switches), but how can I identify that this trap is coming from which device (switch).
because trap containing only system uptime Notification type and actual data like (link up or down)
Received PDU...
Trap Type = -89
Variable Bindings = [1.3.6.1.2.1.1.3.0 = 0:01:35.00, 1.3.6.1.6.3.1.1.4.1.0 = 1.3.6.1.6.3.1.1.5.4, 1.3.6.1.2.1.2.2.1.1 = 1001, 1.3.6.1.2.1.2.2.1.7 = 1, 1.3.6.1.2.1.2.2.1.8 = 1]
Received PDU...
Trap Type = -89
Variable Bindings = [1.3.6.1.2.1.1.3.0 = 0:01:39.00, 1.3.6.1.6.3.1.1.4.1.0 = 1.3.6.1.6.3.1.1.5.4, 1.3.6.1.2.1.2.2.1.1 = 1009, 1.3.6.1.2.1.2.2.1.7 = 1, 1.3.6.1.2.1.2.2.1.8 = 1]
Received PDU...
Trap Type = -89
Variable Bindings = [1.3.6.1.2.1.1.3.0 = 0:01:35.01, 1.3.6.1.6.3.1.1.4.1.0 = 1.3.6.1.6.3.1.1.5.4, 1.3.6.1.2.1.2.2.1.1 = 1007, 1.3.6.1.2.1.2.2.1.7 = 1, 1.3.6.1.2.1.2.2.1.8 = 1]
public static void main(String[] args)
{
TrapReceiver snmp4jTrapReceiver = new TrapReceiver();
try
{
snmp4jTrapReceiver.listen(new UdpAddress("192.168.29.111/162"));
}
catch (IOException e)
{
System.err.println("Error in Listening for Trap");
System.err.println("Exception Message = " + e.getMessage());
}
}
public synchronized void listen(TransportIpAddress address) throws IOException
{
AbstractTransportMapping transport;
if (address instanceof TcpAddress)
{
transport = new DefaultTcpTransportMapping((TcpAddress) address);
}
else
{
transport = new DefaultUdpTransportMapping((UdpAddress) address);
}
ThreadPool threadPool = ThreadPool.create("DispatcherPool", 10);
MessageDispatcher mtDispatcher = new MultiThreadedMessageDispatcher(threadPool, new MessageDispatcherImpl());
// add message processing models
mtDispatcher.addMessageProcessingModel(new MPv2c());
mtDispatcher.addMessageProcessingModel(new MPv1());
SecurityProtocols.getInstance().addDefaultProtocols();
SecurityProtocols.getInstance().addPrivacyProtocol(new Priv3DES());
//Create Target
CommunityTarget target = new CommunityTarget();
target.setCommunity( new OctetString("snmpcom"));
Snmp snmp = new Snmp(mtDispatcher, transport);
snmp.addCommandResponder(this);
transport.listen();
System.out.println("Listening on " + address);
try
{
this.wait();
}
catch (InterruptedException ex)
{
Thread.currentThread().interrupt();
}
}
public synchronized void processPdu(CommandResponderEvent cmdRespEvent)
{
System.out.println("Received PDU...");
PDU pdu = cmdRespEvent.getPDU();
if (pdu != null)
{
System.out.println("Trap Type = " + pdu.getType());
System.out.println("Variable Bindings = " + pdu.getVariableBindings());
int pduType = pdu.getType();
if ((pduType != PDU.TRAP) && (pduType != PDU.V1TRAP) && (pduType != PDU.REPORT)
&& (pduType != PDU.RESPONSE))
{
pdu.setErrorIndex(0);
pdu.setErrorStatus(0);
pdu.setType(PDU.RESPONSE);
StatusInformation statusInformation = new StatusInformation();
StateReference ref = cmdRespEvent.getStateReference();
try
{
System.out.println(cmdRespEvent.getPDU());
cmdRespEvent.getMessageDispatcher().returnResponsePdu(cmdRespEvent.getMessageProcessingModel(),
cmdRespEvent.getSecurityModel(), cmdRespEvent.getSecurityName(), cmdRespEvent.getSecurityLevel(),
pdu, cmdRespEvent.getMaxSizeResponsePDU(), ref, statusInformation);
}
catch (MessageException ex)
{
System.err.println("Error while sending response: " + ex.getMessage());
LogFactory.getLogger(SnmpRequest.class).error(ex);
}
}
}
}
I'm not familiar to snmp4j but after taking a look on documentation looks like you could get switch ip address from CommandResponderEvent.
Now, as per shared outputs what you're seeing are all linkUPs traps
1.3.6.1.6.3.1.1.4.1.0 = 1.3.6.1.6.3.1.1.5.4
For three different ports / ifIndex: 1001, 1009 and 1007
1.3.6.1.2.1.2.2.1.1 = 1009
The correspondence between these numbers and the actual switch ports could be defined in device documentation.
Also, in all cases both ifOperStatus and ifAdminStatus are set to 1 (Up).
1.3.6.1.2.1.2.2.1.7 = 1 , 1.3.6.1.2.1.2.2.1.8 = 1
I hope this could give you an idea what to look for.
BR!

what in kafka DefaultRecord value field

Recently, I review the kafka code and test. I found a strange case:
I print the bytebuffer on the entry of SocketServer processCompletedReceives, as well as print the value on the point of Log sotre as follows:
the entry of SocketServer
private def processCompletedReceives() {
selector.completedReceives.asScala.foreach { receive =>
try {
openOrClosingChannel(receive.source) match {
case Some(channel) =>
val header = RequestHeader.parse(receive.payload)
val connectionId = receive.source
val context = new RequestContext(header, connectionId, channel.socketAddress,
channel.principal, listenerName, securityProtocol)
val req = new RequestChannel.Request(processor = id, context = context,
startTimeNanos = time.nanoseconds, memoryPool, receive.payload, requestChannel.metrics)
if(header.apiKey() == ApiKeys.PRODUCE){
LogHelper.log("produce request: %v" + java.util.Arrays.toString(receive.payload.array()))
}
...
the point of Log
validRecords.records().asScala.foreach { record =>
LogHelper.log("buffer info: value " + java.util.Arrays.toString(record.value().array()))
}
but, the result of print is different. and record.value() is not what I passed in client value like this:
public void run() {
int messageNo = 1;
while (true) {
String messageStr = "Message_" + messageNo;
long startTime = System.currentTimeMillis();
if (isAsync) { // Send asynchronously
producer.send(new ProducerRecord<>(topic,
messageNo,
messageStr), new DemoCallBack(startTime, messageNo, messageStr));
} else { // Send synchronously
try {
producer.send(new ProducerRecord<>(topic,
messageNo,
messageStr)).get();
System.out.println("Sent message: (" + messageNo + ", " + messageStr + ")");
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
++messageNo;
}
}
the print result is not the not String messageStr = "Message_" + messageNo;
so what happend in the case.
done. I write the code as follows:
public class KVExtractor {
private static final Logger logger = LoggerFactory.getLogger(KVExtractor.class);
public static Map.Entry<byte[], byte[]> extract(Record record) {
if (record.hasKey() && record.hasValue()) {
byte[] key = new byte[record.key().limit()];
record.key().get(key);
byte[] value = new byte[record.value().limit()];
record.value().get(value);
System.out.println("key : " + new String(key) + " value: " + new String(value));
return new AbstractMap.SimpleEntry<byte[], byte[]>(key, value);
}else if(record.hasValue()){
// illegal impl
byte[] data = new byte[record.value().limit()];
record.value().get(data);
System.out.println("no key but with value : " + new String(data));
}
return null;
}
}

IN OR multiple operator SAP Java

I am creating a query using JCO, SAP util for the following code for example:
public static void TEST() throws JCoException {
JCoDestination destination;
JCoRepository sapRepository;
destination = JCoDestinationManager.getDestination(ABAP_AS);
JCoDestinationManager.getDestination(ABAP_AS);
System.out.println("Attributes:");
System.out.println(destination.getAttributes());
System.out.println();
try {
JCoContext.begin(destination);
sapRepository = destination.getRepository();
if (sapRepository == null) {
System.out.println("Couldn't get repository!");
System.exit(0);
}
JCoFunctionTemplate functionTemplate = sapRepository.getFunctionTemplate("EM_GET_NUMBER_OF_ENTRIES");
JCoFunction function = functionTemplate.getFunction();
JCoTable itTable = function.getTableParameterList().getTable("IT_TABLES");
itTable.appendRow();
itTable.setValue("TABNAME", "USR02");
// JCoTable returnOptions_ = function.getTableParameterList().getTable("OPTIONS");
// returnOptions_.appendRow();
//// //returnOptions.setValue("TEXT", "MODDA GE '20140908' AND MODTI GT '000000'");
// returnOptions_.setValue("TEXT", "BNAME EQ 'USER'");
function.execute(destination);
System.out.println( function.getTableParameterList().getTable("IT_TABLES").getInt("TABROWS"));
JCoFunctionTemplate template2 = sapRepository.getFunctionTemplate("RFC_READ_TABLE");
System.out.println("Getting template");
JCoFunction function2 = template2.getFunction();
function2.getImportParameterList().setValue("QUERY_TABLE", "USR02");
function2.getImportParameterList().setValue("DELIMITER", ",");
function2.getImportParameterList().setValue( "ROWCOUNT",5);
function2.getImportParameterList().setValue( "ROWSKIPS",5);
System.out.println("Setting OPTIONS");
// Date date = new Date(1410152400000L);
SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss");
// String dateString = formatter.format(date);
// String dt = dateString.substring(0, 8);
// String tm = dateString.substring(8);
// System.out.println("dt > " + dt + ", tm > " + tm);
JCoTable returnOptions = function2.getTableParameterList().getTable("OPTIONS");
returnOptions.appendRow();
//returnOptions.setValue("TEXT", "MODDA GE '20140908' AND MODTI GT '000000'");
returnOptions.setValue("TEXT", "BNAME LIKE 'S%'");
// returnOptions.appendRow();
// returnOptions.setValue("TEXT", "AND TYPE = 'DN'");
System.out.println("Setting FIELDS");
JCoTable returnFields = function2.getTableParameterList().getTable("FIELDS");
returnFields.appendRow();
returnFields.setValue("FIELDNAME", "BNAME");
returnFields.appendRow();
returnFields.setValue("FIELDNAME", "GLTGB");
returnFields.appendRow();
returnFields.setValue("FIELDNAME", "CLASS");
// returnFields.appendRow();
function2.execute(destination);
// JCoTable jcoTablef = function2.getTableParameterList().getTable("FIELDS");
JCoTable jcoTabled = function2.getTableParameterList().getTable("DATA");
int icodeOffSet = 0;
int icodeLength = 0;
int numRows = jcoTabled.getNumRows();
System.out.println("numRows > " + numRows);
for(int i=0; i<numRows; i++) {
jcoTabled.setRow(i);
System.out.println(jcoTabled.getRow());
String BNAME = "BNAE:" + jcoTabled.getString(0);
// String GLTGB = "GLTGB:" + jcoTabled.getString(2);
// String cls = "GLTGB:" + jcoTabled.getString(3);
System.out.println(BNAME + "..." );
}
} catch (Exception e) {
e.printStackTrace();
System.out.println("ERROR: " + e.getMessage());
} finally {
JCoContext.end(destination);
}
}
static void createDestinationDataFile(String destinationName, Properties connectProperties)
{
File destCfg = new File(destinationName+".jcoDestination");
try
{
FileOutputStream fos = new FileOutputStream(destCfg, false);
connectProperties.store(fos, "for tests only !");
fos.close();
}
catch (Exception e)
{
throw new RuntimeException("Unable to create the destination files", e);
}
}
The previous code worked well when I used the EQ operator.
However, when I used the IN operator:
BNAME IN ('USER1','USER','USER3')
or
BNAME EQ 'USER1' OR BNAME EQ 'USER' OR BNAME EQ 'USER3'
It throws an exception: Unexpected dynamic condition
Are there any limitations to the condition size? Since I have 22 field in the IN condition and each value has a size of 10?
You need to specify a valid OpenSQL condition, you need to observe the rules for dynamic conditions and you need to ensure that the condition is properly split into lines of 72 characters. My guess would be that the last bit might have been an issue if you're specifying 22 conditions...

Java Mail API: Convert Message to String?

I am using the following code to successfully retrieve messages from my Gmail account.
// Import Statements
public class ConfirmEmail {
WebDriver driver;
Folder inbox;
String gmailID = "xxxxxxxxxxx#gmail.com";
String gmailPass = "xxxxxxxx";
String storeMessage;
public ConfirmEmail()
{
}
public void MailReader() {
System.out.println("Inside MailReader()...");
final String SSL_FACTORY = "javax.net.ssl.SSLSocketFactory";
/* Set the mail properties */
Properties props = System.getProperties();
// Set manual Properties
props.setProperty("mail.pop3.socketFactory.class", SSL_FACTORY);
props.setProperty("mail.pop3.socketFactory.fallback", "false");
props.setProperty("mail.pop3.port", "995");
props.setProperty("mail.pop3.socketFactory.port", "995");
props.put("mail.pop3.host", "pop.gmail.com");
try
{
/* Create the session and get the store for read the mail. */
Session session = Session.getDefaultInstance(
System.getProperties(), null);
Store store = session.getStore("pop3");
store.connect("pop.gmail.com", 995, gmailID,
gmailPass);
/* Mention the folder name which you want to read. */
// inbox = store.getDefaultFolder();
// inbox = inbox.getFolder("INBOX");
inbox = store.getFolder("INBOX");
/* Open the inbox using store. */
inbox.open(Folder.READ_ONLY);
/* Get the messages which is unread in the Inbox */
Message messages[] = inbox.search(new FlagTerm(new Flags(
Flags.Flag.SEEN), false));
System.out.println("No. of Unread Messages : " + messages.length);
/* Use a suitable FetchProfile */
FetchProfile fp = new FetchProfile();
fp.add(FetchProfile.Item.ENVELOPE);
fp.add(FetchProfile.Item.CONTENT_INFO);
inbox.fetch(messages, fp);
try
{
printAllMessages(messages);
inbox.close(true);
store.close();
}
catch (Exception ex)
{
System.out.println("Exception arise at the time of read mail");
ex.printStackTrace();
}
}
catch (MessagingException e)
{
System.out.println("Exception while connecting to server: "
+ e.getLocalizedMessage());
e.printStackTrace();
System.exit(2);
}
}
public void printAllMessages(Message[] msgs) throws Exception
{
for (int i = 0; i < msgs.length; i++)
{
System.out.println("MESSAGE #" + (i + 1) + ":");
printEnvelope(msgs[i]);
}
}
public void printEnvelope(Message message) throws Exception
{
Address[] a;
// FROM
if ((a = message.getFrom()) != null) {
for (int j = 0; j < a.length; j++) {
System.out.println("FROM: " + a[j].toString());
}
}
// TO
if ((a = message.getRecipients(Message.RecipientType.TO)) != null) {
for (int j = 0; j < a.length; j++) {
System.out.println("TO: " + a[j].toString());
}
}
String subject = message.getSubject();
Date receivedDate = message.getReceivedDate();
Date sentDate = message.getSentDate(); // receivedDate is returning
// null. So used getSentDate()
String content = message.getContent().toString();
System.out.println("Subject : " + subject);
if (receivedDate != null) {
System.out.println("Received Date : " + receivedDate.toString());
}
System.out.println("Sent Date : " + sentDate.toString());
System.out.println("Content : " + content);
getContent(message);
}
public void getContent(Message msg)
{
try {
String contentType = msg.getContentType();
System.out.println("Content Type : " + contentType);
Multipart mp = (Multipart) msg.getContent();
int count = mp.getCount();
for (int i = 0; i < count; i++) {
dumpPart(mp.getBodyPart(i));
}
} catch (Exception ex) {
System.out.println("Exception arise at get Content");
ex.printStackTrace();
}
}
public void dumpPart(Part p) throws Exception {
// Dump input stream ..
InputStream is = p.getInputStream();
// If "is" is not already buffered, wrap a BufferedInputStream
// around it.
if (!(is instanceof BufferedInputStream)) {
is = new BufferedInputStream(is);
}
int c;
System.out.println("Message : ");
while ((c = is.read()) != -1) {
System.out.write(c);
}
}
}
With this code I am successfully able to print messages to console. Works flawlessly 100% of the time.
However, I need to store the "bodyPart" (i.e, the actual message or body of message) in a String so I could search the String using Regex. I need to extract links begining with http.
How can I convert the message to a string?
Thanks
I'm not quite sure what you are asking (because you said you already print out your Messages ... so when you print them, why can't you store them in a String?)
if you realy just want the bodyPart stored in a String variable:
Multipart mp = (Multipart) msg.getContent();
BodyPart bp = mp.getBodyPart(0);
String content = bp.getContent().toString();

Kafka Java consumer works only for localhost and fails for remote server

I've been working with Kafka for two months, and I used this code to consume messages locally. I recently decided to distribute Zookeeper and Kafka and everything seems to work just fine. My issue started when I tried to use the consumer's code from a remote IP; Once I change seeds.add("127.0.0.1"); to seeds.add("104.131.40.xxx"); I get this error message:
run:
Error communicating with Broker [104.131.40.xxx] to find Leader for [temperature, 0] Reason:
java.net.ConnectException: Connection refused Can't find metadata for Topic and Partition. Exiting
BUILD SUCCESSFUL (total time: 21 seconds)r code here
this is the code that I currently use:
/*
Kafka API consumer reads 10 readings from the "temperature" topic
*/
package simpleexample;
import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SimpleExample {
public static void main(String args[]) {
SimpleExample example = new SimpleExample();
//long maxReads = Long.parseLong(args[0]);
long maxReads = 10;
//String topic = args[1];
String topic = "temperature";
//int partition = Integer.parseInt(args[2]);
int partition =0;
List<String> seeds = new ArrayList<String>();
//seeds.add(args[3]);
seeds.add("104.131.40.xxx");
//int port = Integer.parseInt(args[4]);
int port =9092;
try {
example.run(maxReads, topic, partition, seeds, port);
} catch (Exception e) {
System.out.println("Oops:" + e);
e.printStackTrace();
}
}
private List<String> m_replicaBrokers = new ArrayList<String>();
public SimpleExample() {
m_replicaBrokers = new ArrayList<String>();
}
public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, int a_port) throws Exception {
// find the meta data about the topic and partition we are interested in
//
PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
if (metadata.leader() == null) {
System.out.println("Can't find Leader for Topic and Partition. Exiting");
return;
}
String leadBroker = metadata.leader().host();
String clientName = "Client_" + a_topic + "_" + a_partition;
SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
long readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
int numErrors = 0;
while (a_maxReads > 0) {
if (consumer == null) {
consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
}
FetchRequest req = new FetchRequestBuilder()
.clientId(clientName)
.addFetch(a_topic, a_partition, readOffset, 100000) // Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
.build();
FetchResponse fetchResponse = consumer.fetch(req);
if (fetchResponse.hasError()) {
numErrors++;
// Something went wrong!
short code = fetchResponse.errorCode(a_topic, a_partition);
System.out.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
if (numErrors > 5) break;
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for the last element to reset
readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
continue;
}
consumer.close();
consumer = null;
leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
continue;
}
numErrors = 0;
long numRead = 0;
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
long currentOffset = messageAndOffset.offset();
if (currentOffset < readOffset) {
System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
continue;
}
readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
a_maxReads--;
}
if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
if (consumer != null) consumer.close();
}
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition) );
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
}
private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port) throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
// first time through if the leader hasn't changed give ZooKeeper a second to recover
// second time, assume the broker did recover before failover, or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
System.out.println("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
}
private PartitionMetadata findLeader(List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
PartitionMetadata returnMetaData = null;
loop:
for (String seed : a_seedBrokers) {
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
List<String> topics = Collections.singletonList(a_topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == a_partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
System.out.println("Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic
+ ", " + a_partition + "] Reason: " + e);
} finally {
if (consumer != null) consumer.close();
}
}
if (returnMetaData != null) {
m_replicaBrokers.clear();
for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
m_replicaBrokers.add(replica.host());
}
}
return returnMetaData;
}
}
You need to set the advertised.host.name instead of host.name in the kafka server.properties configuration file.

Categories

Resources