package com.cisdi.dsp.modules.metaAnalysis.rest.kafka2023;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.time.Duration;
import java.util.*;
public class KafkaTest24 {
public static final String brokerList = "k8s-master:9092";
public static Properties getConsumerProperties() {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//必须配置手动提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "groupId");
return props;
}
public static Properties getProducerProperties() {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
return props;
}
//先从source-topic消费,再往sink-topic生产
public static void main(String[] args) {
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties());
consumer.subscribe(Collections.singletonList("source-topic"));
KafkaProducer<String, String> producer = new KafkaProducer<>(getProducerProperties());
//初始化事务
producer.initTransactions();
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
if (!records.isEmpty()) {
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
//开启事务
producer.beginTransaction();
try {
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
for (ConsumerRecord<String, String> record : partitionRecords) {
ProducerRecord<String, String> producerRecord =
new ProducerRecord<>("sink-topic", record.key(), record.value());
producer.send(producerRecord);
System.out.println("sent :" + record.value());
}
long lastConsumedOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
offsets.put(partition, new OffsetAndMetadata(lastConsumedOffset + 1));
}
// 提交消费位移
// consume-transform-produce模式,此处的group id 必须要配置成consumer 中配置的group id
producer.sendOffsetsToTransaction(offsets, "groupId");
producer.commitTransaction();
} catch (ProducerFencedException e) {
producer.abortTransaction();
}
}
}
}
}
文章来源地址https://www.toymoban.com/news/detail-682623.html
文章来源:https://www.toymoban.com/news/detail-682623.html
到了这里,关于kafka复习:(24)consume-transform-produce模式的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!