I am working on a Kafka Custom partitioner class. Here I am trying to push the data into separate partitions.
My Kafka producer class:
import java.util.Date;
import java.util.Properties;
import java.util.Random;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class KafkaCustomPartitioner {
public static void main(String[] args) {
long events = Long.parseLong(args[0]);
int blocks = Integer.parseInt(args[1]);
Random rnd = new Random();
Properties props = new Properties();
props.put("metadata.broker.list", "localhost:9092");
props.put("serializer.class","kafka.serializer.StringEncoder");
props.put("key.serializer.class", "kafka.serializer.StringEncoder");
props.put("partitioner.class","com.kafka.partdecider.CustomPartitioner");
props.put("producer.type", "sync");
props.put("request.required.acks","1");
ProducerConfig config = new ProducerConfig(props);
Producer producer = new Producer(config);
for(int nBlocks=0; nBlocks<blocks; nBlocks++) {
for(long nEvents=0; nEvents<events; nEvents++) {
long runTime = new Date().getTime();
String msg = runTime + ": " + (50+nBlocks) + ": " + nEvents + ": " + rnd;
KeyedMessage<String, String> data = new KeyedMessage<String, String>("CustPartTopic",String.valueOf(nBlocks),msg);
producer.send(data);
}
}
producer.close();
}
}
Customer Partitioner Class:
import kafka.producer.Partitioner;
public class CustomPartitioner implements Partitioner {
public int partition(Object key, int arg1) {
String receivingkey = (String) key;
long id = Long.parseLong(receivingkey);
return (int) (id%arg1);
}
}
The project's arguments section has the values: 3 2
I am getting "ArrayOutOfBoundsException" at this line if I run the class:
Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: 0
at com.kafka.custompartitioner.KafkaCustomPartitioner.main(KafkaCustomPartitioner.java:13)
The error is shown at the line:long events = Long.parseLong(args[0]);
But I don't understand why is that line giving the error.
Could anyone let me know how can I fix this ?
This works for me, the API are quite different :
package mypackage.io;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Date;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ExecutionException;
public class KafkaCustomPartitioner {
public static void main(String[] args) throws InterruptedException, ExecutionException {
long events = Long.parseLong(args[0]);
int blocks = Integer.parseInt(args[1]);
Random rnd = new Random();
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "mypackage.io.CustomPartitioner");
props.put(ProducerConfig.ACKS_CONFIG, "1");
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
for(int nBlocks=0; nBlocks<blocks; nBlocks++) {
for(long nEvents=0; nEvents<events; nEvents++) {
long runTime = new Date().getTime();
String msg = runTime + ": " + (50+nBlocks) + ": " + nEvents + ": " + rnd;
producer.send(new ProducerRecord<String, String>("CustPartTopic", String.valueOf(nBlocks), msg)).get();
}
}
producer.close();
}
}
then the custom partitioner
package mypackage.io;
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import java.util.Map;
public class CustomPartitioner implements Partitioner {
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
String receivingkey = (String) key;
long id = Long.parseLong(receivingkey);
int numPartitions = cluster.availablePartitionsForTopic(topic).size();
return (int) (id % numPartitions);
}
public void close() {
}
public void configure(Map<String, ?> map) {
}
}
Related
I am using streaming table example as explain from here: https://udemy.com/course/kafka-streams-real-time-stream-processing-master-class/learn/lecture/14244016#questions, when I upgrade kafka dependency from 2.x to 3.3.2, below method is failing
error:
store(org.apache.kafka.streams.StoreQueryParameters<T>)' in 'org.apache.kafka.streams.KafkaStreams' cannot be applied to '(java.lang.String, org.apache.kafka.streams.state.QueryableStoreType<org.apache.kafka.streams.state.ReadOnlyKeyValueStore<java.lang.Object,java.lang.Object>>)'
QueryServer.java
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.state.HostInfo;
import org.apache.kafka.streams.state.QueryableStoreTypes;
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import spark.Spark;
import javax.ws.rs.client.Client;
import javax.ws.rs.client.ClientBuilder;
import java.util.ArrayList;
import java.util.List;
class QueryServer {
private static final Logger logger = LogManager.getLogger();
private final String NO_RESULTS = "No Results Found";
private final String APPLICATION_NOT_ACTIVE = "Application is not active. Try later.";
private final KafkaStreams streams;
private Boolean isActive = false;
private final HostInfo hostInfo;
private Client client;
QueryServer(KafkaStreams streams, String hostname, int port) {
this.streams = streams;
this.hostInfo = new HostInfo(hostname, port);
client = ClientBuilder.newClient();
}
void setActive(Boolean state) {
isActive = state;
}
private List<KeyValue<String, String>> readAllFromLocal() {
List<KeyValue<String, String>> localResults = new ArrayList<>();
ReadOnlyKeyValueStore<String, String> stateStore =
streams.store(AppConfigs.stateStoreName, QueryableStoreTypes.keyValueStore());
stateStore.all().forEachRemaining(localResults::add);
return localResults;
}
void start() {
logger.info("Starting Query Server at http://" + hostInfo.host() + ":" + hostInfo.port()
+ "/" + AppConfigs.stateStoreName + "/all");
Spark.port(hostInfo.port());
Spark.get("/" + AppConfigs.stateStoreName + "/all", (req, res) -> {
List<KeyValue<String, String>> allResults;
String results;
if (!isActive) {
results = APPLICATION_NOT_ACTIVE;
} else {
allResults = readAllFromLocal();
results = (allResults.size() == 0) ? NO_RESULTS
: allResults.toString();
}
return results;
});
}
void stop() {
client.close();
Spark.stop();
}
}
MainApp.java
public class StreamingTableApp {
private static final Logger logger = LogManager.getLogger();
public static void main(final String[] args) {
final Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation);
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
StreamsBuilder streamsBuilder = new StreamsBuilder();
KTable<String, String> KT0 = streamsBuilder.table(AppConfigs.topicName);
KT0.toStream().print(Printed.<String, String>toSysOut().withLabel("KT0"));
KTable<String, String> KT1 = KT0.filter((k, v) -> k.matches(AppConfigs.regExSymbol) && !v.isEmpty(),
Materialized.as(AppConfigs.stateStoreName));
KT1.toStream().print(Printed.<String, String>toSysOut().withLabel("KT1"));
KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), props);
//Query Server
QueryServer queryServer = new QueryServer(streams, AppConfigs.queryServerHost, AppConfigs.queryServerPort);
streams.setStateListener((newState, oldState) -> {
logger.info("State Changing to " + newState + " from " + oldState);
queryServer.setActive(newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING);
});
streams.start();
queryServer.start();
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
logger.info("Shutting down servers");
queryServer.stop();
streams.close();
}));
}
}
How to set timeout before response in kafka properties in java language as i need to mock webhook api message pushing with timeout before response = 60 sec
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringSerializer;
public class AsyncConsumer {
public static void main(String[] args) {
// Properties to connect Kafka
Properties prop = new Properties();
prop.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
prop.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
prop.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
prop.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "1");
//prop.setProperty(ConsumerConfig.de, "60000");
prop.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_DOC, "60000");
prop.setProperty(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_DOC, "60000");
prop.setProperty(ConsumerConfig., "60000");
// prop.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "60000");
// prop.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "60000");
// prop.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_DOC, "60000");
// prop.setProperty(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, "60000");
// prop.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "60000");
prop.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "70000");
// kafka Consumer
KafkaConsumer<String, Object> consumer = new KafkaConsumer<String, Object>(prop);
// Subscription of kafka
consumer.subscribe(Arrays.asList("TestTopic"));
// read data from kafka
while (true) {
ConsumerRecords<String, Object> record = consumer.poll(60000);
for (ConsumerRecord<String, Object> records : record) {
System.out.println("Key :" + records.key() + "\n" + "Value :" + records.value() + "\n" + "partition :"
+ records.partition() + "\n" + "offset :" + records.offset() + "\n" + "topic :"
+ records.topic() + "\n" + "timestamp :" + records.timestamp() + "\n");
}
}
}
}
package com.matchmove.Performance.kafka.tests;
import java.util.Date;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import com.jcraft.jsch.Logger;
import com.matchmove.tests.ASYNCpostWebhookTest;
import org.apache.kafka.common.serialization.ByteArraySerializer;
public class AsyncProducer {
public static void main(String[] args) {
// Properties to connect Kafka
Properties prop = new Properties();
prop.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
prop.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
prop.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
//prop.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_DOC,"60000");
//prop.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,"60000");
//prop.setProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,"60000");
//prop.setProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,"60000");
prop.setProperty(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, "60000");
//creating producer
KafkaProducer<String,Object> producer=new KafkaProducer<String,Object>(prop);
for(int i=0; i<=100; i++) {
ProducerRecord<String,Object> record=new ProducerRecord<String, Object>("TestTopic",ASYNCpostWebhookTest.PostAsynPerformance());
producer.send(record);
try {
Thread.sleep(1000);
}catch(InterruptedException e) {
e.printStackTrace();
}
System.out.println(record);
}
producer.close();
}
}
package com.matchmove.tests;
import org.slf4j.LoggerFactory;
import com.matchmove.utilities.RandomStringGeneration;
import io.restassured.RestAssured;
import io.restassured.response.Response;
import io.restassured.specification.RequestSpecification;
import static io.restassured.RestAssured.given;
import java.util.HashMap;
import com.github.fge.jsonschema.main.cli.Main;
import com.github.javafaker.Faker;
import com.matchmove.helpingfiles.*;
import org.slf4j.Logger;
public class ASYNCpostWebhookTest {
protected static Logger logger = LoggerFactory.getLogger(ASYNCpostWebhookTest.class.getName());
Faker fake = new Faker();
static HashMap<String, Object> AsyncBody = new HashMap<String, Object>();
static HashMap<String, Object> message = new HashMap<String, Object>();
public static RequestSpecification httpRequest;
public static Response response;
public static String generatedString = RandomStringGeneration.randomstring();
public static String generatedNumber = RandomStringGeneration.number();
public static String PostAsynPerformance() {
RestAssured.baseURI = "https://platform-svc-webhook.kops.matchmove-beta.com";
httpRequest = RestAssured.given();
AsyncBody.put("trace_id", "Amit Bhalla data " + generatedNumber);
AsyncBody.put("service_name", "fast");
AsyncBody.put("event_name", "OPENLOOP_DECLINED_PROGRAM_POS_LIMITS_BREACHED");
AsyncBody.put("push_message_to_queue_count", 10);
message.put("message", "Testing for Async by Amit Bhalla " + generatedNumber);
AsyncBody.put("event_data", message);
response = given().header("Authorization",
"Basic ZW5YZTVFMnhSU3Z6RnFCWUZvOEF2ejV2UnJsd2hEeDM6ZnNPcHp1RXE0WGFUbm0wM013YmpSS2hSYzk3TktQR2FzbjRtc1hieTB"
+ "zQ21GYUlpZ3BZTUF3UGVUNDZrUEE3SA==")
.body(AsyncBody).when().post(resources.PostAsyncWebhook);
// String responsebody = response.getBody().asString();
String body = AsyncBody.toString();
return body;
}
/*
* public static void main(String [] args) {
*
* ASYNCpostWebhookTest data=new ASYNCpostWebhookTest();
* ASYNCpostWebhookTest.PostAsynPerformance(); }
*/
}
I want to execute a Kafka producer using multiple threads. Below is the code that I have tried out. I am unaware of how to implement threads in Kafka producer since I am not well versed with Thread programming.
Below is the code for my producer.
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class KafkaProducerWithThread {
//init params
final String bootstrapServer = "127.0.0.1:9092";
final String topicName = "spark-data-topic";
final String csvFileName = "unique_products.csv";
final static int MAX_THREAD = 2; //created number of threads
//Logger
final Logger logger = LoggerFactory.getLogger(KafkaProducerWithThread.class);
public KafkaProducerWithThread() throws FileNotFoundException {
}
public static void main(String[] args) throws IOException {
new KafkaProducerWithThread().runProducer();
}
public void runProducer() throws IOException {
//Read the CSV file from Resources folder as BufferedReader
ClassLoader classLoader = new KafkaProducerWithThread().getClass().getClassLoader();
BufferedReader reader = new BufferedReader(new FileReader(classLoader.getResource(csvFileName).getFile()));
//Create a Kafka Producer
org.apache.kafka.clients.producer.KafkaProducer<String, String> producer = createKafkaProducer();
//Kafka Producer Metrics
Metric requestTotalMetric = null;
for (Map.Entry<MetricName, ? extends Metric> entry : producer.metrics().entrySet()) {
if ("request-total".equals(entry.getKey().name())) {
requestTotalMetric = entry.getValue();
}
}
//Thread
ExecutorService executorService = Executors.newFixedThreadPool(MAX_THREAD);
//Read the CSV file line by line
String line = "";
int i = 0;
while ((line = reader.readLine()) != null) {
i++;
String key = "products_" + i;
//Create a ProducerRecord
ProducerRecord<String, String> csvProducerRecord = new ProducerRecord<>(topicName, key, line.trim());
//Send the data - Asynchronously
producer.send(csvProducerRecord, new Callback() {
#Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
//executes every time a record is sent successfully or an exception is thrown
if (e == null) {
//the record was sent successfully
// logger.info("Received new metadata. \n" +
// "Topic: " + recordMetadata.topic() + "\n" +
// "Partition: " + recordMetadata.partition() + "\n" +
// "Offset: " + recordMetadata.offset() + "\n" +
// "Timestamp: " + recordMetadata.timestamp());
} else {
logger.error("Error while producing", e);
}
}
});
if (i % 1000 == 0){
logger.info("Record #: " + i + " Request rate: " + requestTotalMetric.metricValue());
}
}
//Adding a shutdown hook
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
logger.info("Stopping the Producer!");
producer.flush();
producer.close();
logger.info("Stopped the Producer!");
}));
}
public org.apache.kafka.clients.producer.KafkaProducer<String, String> createKafkaProducer() {
//Create Producer Properties
Properties properties = new Properties();
properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
properties.setProperty(ProducerConfig.ACKS_CONFIG, "all");
properties.setProperty(ProducerConfig.RETRIES_CONFIG, "5");
properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); // For an idempotent producer
//kafka can detect whether it's a duplicate data based on the producer request id.
//Create high throughput Producer at the expense of latency & CPU
properties.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
properties.setProperty(ProducerConfig.LINGER_MS_CONFIG, "60");
properties.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, Integer.toString(32 * 1024)); //32KB batch size
//Create Kafka Producer
org.apache.kafka.clients.producer.KafkaProducer<String, String> csvProducer = new org.apache.kafka.clients.producer.KafkaProducer<String, String>(properties);
return csvProducer;
}
}
Can anyone help me in implementing the threads in my Kafka producer program?
My Producer will be producing over a million records & so I want to implement threads for the same. I am aware of ExecutorService used for thread programming but I am not sure how to implement in this case.
Thanks.
create a MessageSender class as given below.
after creating the producer class, create a new MesssageSender object taking the producer record and producer as constructor args.
invoke executorService.submit() to perform the task.
class Producer {
ExecutorService executorService =
Executors.newFixedThreadPool(MAX_THREAD);
//Read the CSV file line by line
String line = "";
int i = 0;
while ((line = reader.readLine()) != null) {
//create produver record
ProducerRecord<String, String> csvProducerRecord = new ProducerRecord<>(topicName, key, line.trim());
MessageSender sendMessage= new MessageSender(csvProducerRecord,producer);
executorService.submit()...
}
}
//Thread class
class MessageSender implements Runnable<>{
MessageSender(Producerrecord,producer{
//store in class level variable in thread class
}
public void run(){
producer.send(csvProducerRecord...);
}
I am working on a Get object as retrieved from a table in Habse. I want to dynamically retrieve all column values related to that get since I don't know the exact name of column families
val result1 = hTable.get(g)
if (!result1.isEmpty) {
//binaryEpisodes = result1.getValue(Bytes.toBytes("episodes"),Bytes.toBytes("episodes"))
//instead of above retrieve all values dynamically
}
Simple way :
get rawcells and knowing CF , columns information.
You have to do something like below example
public static void printResult(Result result, Logger logger) {
logger.info("Row: ");
for (Cell cell : result.rawCells()) {
byte[] family = CellUtil.cloneFamily(cell);
byte[] column = CellUtil.cloneQualifier(cell);
byte[] value = CellUtil.cloneValue(cell);
logger.info("\t" + Bytes.toString(family) + ":" + Bytes.toString(column) + " = " + Bytes.toString(value));
}
}
Hbase Admin way : Hbase client API was exposed by HbaseAdmin class like below...
Client would be like
package mytest;
import com.usertest.*;
import java.io.IOException;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class ListHbaseTablesAndColumns {
public static void main(String[] args) {
try {
HbaseMetaData hbaseMetaData =new HbaseMetaData();
for(String hbaseTable:hbaseMetaData .getTableNames(".*yourtables.*")){
for (String column : hbaseMetaData .getColumns(hbaseTable, 10000)) {
System.out.println(hbaseTable + "," + column);
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
Use below class to Get HbaseMetaData..
package com.usertest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.PageFilter;
import java.io.IOException;
import java.util.*;
import java.util.regex.Pattern;
public class HbaseMetaData {
private HBaseAdmin hBaseAdmin;
private Configuration hBaseConfiguration;
public HbaseMetaData () throws IOException {
this.hBaseConfiguration = HBaseConfiguration.create();
this.hBaseAdmin = new HBaseAdmin(hBaseConfiguration);
}
/** get all Table names **/
public List<String> getTableNames(String regex) throws IOException {
Pattern pattern=Pattern.compile(regex);
List<String> tableList = new ArrayList<String>();
TableName[] tableNames=hBaseAdmin.listTableNames();
for (TableName tableName:tableNames){
if(pattern.matcher(tableName.toString()).find()){
tableList.add(tableName.toString());
}
}
return tableList;
}
/** Get all columns **/
public Set<String> getColumns(String hbaseTable) throws IOException {
return getColumns(hbaseTable, 10000);
}
/** get all columns from the table **/
public Set<String> getColumns(String hbaseTable, int limitScan) throws IOException {
Set<String> columnList = new TreeSet<String>();
HTable hTable=new HTable(hBaseConfiguration, hbaseTable);
Scan scan=new Scan();
scan.setFilter(new PageFilter(limitScan));
ResultScanner results = hTable.getScanner(scan);
for(Result result:results){
for(KeyValue keyValue:result.list()){
columnList.add(
new String(keyValue.getFamily()) + ":" +
new String(keyValue.getQualifier())
);
}
}
return columnList;
}
}
I have tried to run this code but it doesn't work because of producer.send() doesn't accept KeyedMessage type.
I tried to import kafka.javaapi.producer.Producer instead of kafka.producer.Producer; but still doesn't work
The code is:
package sources;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.Properties;
//import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import kafka.javaapi.producer.Producer;
//import kafka.producer.Producer;
public class ProducerCode {
private static Producer<Integer, String> producer;
private static final String topic= "mytopic";
public void initialize() {
Properties producerProps = new Properties();
producerProps.put("metadata.broker.list", "localhost:9092");
producerProps.put("serializer.class", "kafka.serializer.StringEncoder");
producerProps.put("request.required.acks", "1");
// ProducerConfig producerConfig = new ProducerConfig(producerProps);
// have a change here **
producer = new Producer<Integer, String>(new ProducerConfig(producerProps));
}
public void publishMesssage() throws Exception{
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
while (true){
System.out.print("Enter message to send to kafka broker (Press 'Y' to close producer): ");
String msg = null;
msg = reader.readLine(); // Read message from console
//Define topic name and message
KeyedMessage<Integer, String> keyedMsg = new KeyedMessage<Integer, String>(topic, msg);
producer.send(keyedMsg);
// producer.send(keyedMsg); // This publishes message on given topic
if("Y".equals(msg)){ break; }
System.out.println("--> Message [" + msg + "] sent.Check message on Consumer's program console");
}
return;
}
public static void main(String[] args) throws Exception {
KafkaProducer kafkaProducer = new KafkaProducer();
// Initialize producer
kafkaProducer.initialize();
// Publish message
kafkaProducer.publishMesssage();
//Close the producer
producer.close();
}
}
You have to use ProducerRecord (instead of KeyedMessage) with constructor ProducerRecord(String topic, K key, V value)
Producer<String, String> producer = new KafkaProducer<>(props);
producer.send(new ProducerRecord<String, String>("my-topic", "key", "value"));
See https://kafka.apache.org/0100/javadoc/index.html?org/apache/kafka/clients/producer/KafkaProducer.html