diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java index aed848d..b95d1d8 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java @@ -1,91 +1,92 @@ -package com.casic.missiles.parser.sender.impl; - -import cn.hutool.core.date.DateUtil; -import com.alibaba.fastjson.JSON; -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.pojo.SubscribeDetailConfig; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.stereotype.Component; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.util.CollectionUtils; - -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author cz - * @date 2023-11-10 - */ -@Component("kafka") -public class KafkaSubscribe implements DataSubscribeProvider { - - private KafkaTemplate kafkaTemplate; - - public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { - this.kafkaTemplate = kafkaTemplate; - //回调方法、异常处理 - this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); - } - - @Override - @Transactional - public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { - if (CollectionUtils.isEmpty(bizDataMapList)) { - return; - } - Map contentMap = new HashMap(), mBody = new HashMap(); - Map bizDataMap = bizDataMapList.get(0); - switch ((Integer) bizDataMap.get("deviceType")) { - case 32: - contentMap.put("devType", "GasDetector"); - setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); - break; - case 31: - contentMap.put("devType", "Pressure"); - setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); - } - - if (bizDataMap.containsKey("dataValue")) { - contentMap.put("mType", "Data"); - if (bizDataMap.containsKey("cell")) { - mBody.put("cell", bizDataMap.get("cell")); - } - mBody.put("datas", bizDataMapList); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - if (bizDataMap.containsKey("imei")) { - contentMap.put("mType", "StartupRequest"); - mBody.put("iccid", bizDataMap.get("iccid")); - mBody.put("imei", bizDataMap.get("imei")); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { - if (bizDataMap.containsKey("config")) { - contentMap.put("mType", "SetResponse"); - contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); - if ("1".equals(bizDataMap.get("config"))) { - mBody.put("bType", bTypeSuccess); - }else { - mBody.put("bType", bTypeFail); - } - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - - private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { - if (bizDataMap.containsKey("devcode")) { - contentMap.put("devCode", bizDataMap.get("devcode")); - } - contentMap.put("mBody", mBody); - kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); - } - -} +//package com.casic.missiles.parser.sender.impl; +// +//import cn.hutool.core.date.DateUtil; +//import com.alibaba.fastjson.JSON; +//import com.casic.missiles.autoconfig.KafkaSendResultHandler; +//import com.casic.missiles.parser.sender.DataSubscribeProvider; +//import com.casic.missiles.pojo.SubscribeDetailConfig; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.stereotype.Component; +//import org.springframework.transaction.annotation.Transactional; +//import org.springframework.util.CollectionUtils; +// +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.Map; +// +///** +// * @author cz +// * @date 2023-11-10 +// */ +//@Component("kafka") +//public class KafkaSubscribe implements DataSubscribeProvider { +// +// private KafkaTemplate kafkaTemplate; +// +// public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { +// this.kafkaTemplate = kafkaTemplate; +// //回调方法、异常处理 +// this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); +// } +// +// @Override +// @Transactional +// public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { +// if (CollectionUtils.isEmpty(bizDataMapList)) { +// return; +// } +// Map contentMap = new HashMap(), mBody = new HashMap(); +// Map bizDataMap = bizDataMapList.get(0); +// switch ((Integer) bizDataMap.get("deviceType")) { +// case 32: +// contentMap.put("devType", "GasDetector"); +// setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); +// break; +// case 31: +// contentMap.put("devType", "Pressure"); +// setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); +// } +// +// if (bizDataMap.containsKey("dataValue")) { +// contentMap.put("mType", "Data"); +// if (bizDataMap.containsKey("cell")) { +// mBody.put("cell", bizDataMap.get("cell")); +// } +// mBody.put("datas", bizDataMapList); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// if (bizDataMap.containsKey("imei")) { +// contentMap.put("mType", "StartupRequest"); +// mBody.put("iccid", bizDataMap.get("iccid")); +// mBody.put("imei", bizDataMap.get("imei")); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置下发配置回复 +// private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { +// if (bizDataMap.containsKey("config")) { +// contentMap.put("mType", "SetResponse"); +// contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); +// if ("1".equals(bizDataMap.get("config"))) { +// mBody.put("bType", bTypeSuccess); +// }else { +// mBody.put("bType", bTypeFail); +// } +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置kafka回复 +// private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { +// if (bizDataMap.containsKey("devcode")) { +// contentMap.put("devCode", bizDataMap.get("devcode")); +// } +// contentMap.put("mBody", mBody); +// kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); +// } +// +//} diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java index aed848d..b95d1d8 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java @@ -1,91 +1,92 @@ -package com.casic.missiles.parser.sender.impl; - -import cn.hutool.core.date.DateUtil; -import com.alibaba.fastjson.JSON; -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.pojo.SubscribeDetailConfig; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.stereotype.Component; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.util.CollectionUtils; - -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author cz - * @date 2023-11-10 - */ -@Component("kafka") -public class KafkaSubscribe implements DataSubscribeProvider { - - private KafkaTemplate kafkaTemplate; - - public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { - this.kafkaTemplate = kafkaTemplate; - //回调方法、异常处理 - this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); - } - - @Override - @Transactional - public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { - if (CollectionUtils.isEmpty(bizDataMapList)) { - return; - } - Map contentMap = new HashMap(), mBody = new HashMap(); - Map bizDataMap = bizDataMapList.get(0); - switch ((Integer) bizDataMap.get("deviceType")) { - case 32: - contentMap.put("devType", "GasDetector"); - setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); - break; - case 31: - contentMap.put("devType", "Pressure"); - setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); - } - - if (bizDataMap.containsKey("dataValue")) { - contentMap.put("mType", "Data"); - if (bizDataMap.containsKey("cell")) { - mBody.put("cell", bizDataMap.get("cell")); - } - mBody.put("datas", bizDataMapList); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - if (bizDataMap.containsKey("imei")) { - contentMap.put("mType", "StartupRequest"); - mBody.put("iccid", bizDataMap.get("iccid")); - mBody.put("imei", bizDataMap.get("imei")); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { - if (bizDataMap.containsKey("config")) { - contentMap.put("mType", "SetResponse"); - contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); - if ("1".equals(bizDataMap.get("config"))) { - mBody.put("bType", bTypeSuccess); - }else { - mBody.put("bType", bTypeFail); - } - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - - private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { - if (bizDataMap.containsKey("devcode")) { - contentMap.put("devCode", bizDataMap.get("devcode")); - } - contentMap.put("mBody", mBody); - kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); - } - -} +//package com.casic.missiles.parser.sender.impl; +// +//import cn.hutool.core.date.DateUtil; +//import com.alibaba.fastjson.JSON; +//import com.casic.missiles.autoconfig.KafkaSendResultHandler; +//import com.casic.missiles.parser.sender.DataSubscribeProvider; +//import com.casic.missiles.pojo.SubscribeDetailConfig; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.stereotype.Component; +//import org.springframework.transaction.annotation.Transactional; +//import org.springframework.util.CollectionUtils; +// +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.Map; +// +///** +// * @author cz +// * @date 2023-11-10 +// */ +//@Component("kafka") +//public class KafkaSubscribe implements DataSubscribeProvider { +// +// private KafkaTemplate kafkaTemplate; +// +// public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { +// this.kafkaTemplate = kafkaTemplate; +// //回调方法、异常处理 +// this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); +// } +// +// @Override +// @Transactional +// public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { +// if (CollectionUtils.isEmpty(bizDataMapList)) { +// return; +// } +// Map contentMap = new HashMap(), mBody = new HashMap(); +// Map bizDataMap = bizDataMapList.get(0); +// switch ((Integer) bizDataMap.get("deviceType")) { +// case 32: +// contentMap.put("devType", "GasDetector"); +// setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); +// break; +// case 31: +// contentMap.put("devType", "Pressure"); +// setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); +// } +// +// if (bizDataMap.containsKey("dataValue")) { +// contentMap.put("mType", "Data"); +// if (bizDataMap.containsKey("cell")) { +// mBody.put("cell", bizDataMap.get("cell")); +// } +// mBody.put("datas", bizDataMapList); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// if (bizDataMap.containsKey("imei")) { +// contentMap.put("mType", "StartupRequest"); +// mBody.put("iccid", bizDataMap.get("iccid")); +// mBody.put("imei", bizDataMap.get("imei")); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置下发配置回复 +// private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { +// if (bizDataMap.containsKey("config")) { +// contentMap.put("mType", "SetResponse"); +// contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); +// if ("1".equals(bizDataMap.get("config"))) { +// mBody.put("bType", bTypeSuccess); +// }else { +// mBody.put("bType", bTypeFail); +// } +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置kafka回复 +// private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { +// if (bizDataMap.containsKey("devcode")) { +// contentMap.put("devCode", bizDataMap.get("devcode")); +// } +// contentMap.put("mBody", mBody); +// kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java index 3d89d0d..833a4b1 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java @@ -6,7 +6,7 @@ import com.casic.missiles.cache.ProtocolProcessEventListener; import com.casic.missiles.parser.safe.SafeStrategy; import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; +//import com.casic.missiles.parser.sender.impl.KafkaSubscribe; import com.casic.missiles.pojo.*; import com.casic.missiles.registry.DatagramEventRegistry; import com.casic.missiles.registry.SubscribeRegistry; @@ -148,8 +148,8 @@ * 数据订阅 */ public void storeData(List> bizDataMap) { - DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); - subscribeProvider.publishDataSubscribe(bizDataMap, null); +// DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); +// subscribeProvider.publishDataSubscribe(bizDataMap, null); } // DataSubscribeProvider dataSubscribeProvider = SpringContextUtil.getBean(processorInstance.getSubscribeBean()); diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java index aed848d..b95d1d8 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java @@ -1,91 +1,92 @@ -package com.casic.missiles.parser.sender.impl; - -import cn.hutool.core.date.DateUtil; -import com.alibaba.fastjson.JSON; -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.pojo.SubscribeDetailConfig; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.stereotype.Component; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.util.CollectionUtils; - -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author cz - * @date 2023-11-10 - */ -@Component("kafka") -public class KafkaSubscribe implements DataSubscribeProvider { - - private KafkaTemplate kafkaTemplate; - - public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { - this.kafkaTemplate = kafkaTemplate; - //回调方法、异常处理 - this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); - } - - @Override - @Transactional - public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { - if (CollectionUtils.isEmpty(bizDataMapList)) { - return; - } - Map contentMap = new HashMap(), mBody = new HashMap(); - Map bizDataMap = bizDataMapList.get(0); - switch ((Integer) bizDataMap.get("deviceType")) { - case 32: - contentMap.put("devType", "GasDetector"); - setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); - break; - case 31: - contentMap.put("devType", "Pressure"); - setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); - } - - if (bizDataMap.containsKey("dataValue")) { - contentMap.put("mType", "Data"); - if (bizDataMap.containsKey("cell")) { - mBody.put("cell", bizDataMap.get("cell")); - } - mBody.put("datas", bizDataMapList); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - if (bizDataMap.containsKey("imei")) { - contentMap.put("mType", "StartupRequest"); - mBody.put("iccid", bizDataMap.get("iccid")); - mBody.put("imei", bizDataMap.get("imei")); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { - if (bizDataMap.containsKey("config")) { - contentMap.put("mType", "SetResponse"); - contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); - if ("1".equals(bizDataMap.get("config"))) { - mBody.put("bType", bTypeSuccess); - }else { - mBody.put("bType", bTypeFail); - } - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - - private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { - if (bizDataMap.containsKey("devcode")) { - contentMap.put("devCode", bizDataMap.get("devcode")); - } - contentMap.put("mBody", mBody); - kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); - } - -} +//package com.casic.missiles.parser.sender.impl; +// +//import cn.hutool.core.date.DateUtil; +//import com.alibaba.fastjson.JSON; +//import com.casic.missiles.autoconfig.KafkaSendResultHandler; +//import com.casic.missiles.parser.sender.DataSubscribeProvider; +//import com.casic.missiles.pojo.SubscribeDetailConfig; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.stereotype.Component; +//import org.springframework.transaction.annotation.Transactional; +//import org.springframework.util.CollectionUtils; +// +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.Map; +// +///** +// * @author cz +// * @date 2023-11-10 +// */ +//@Component("kafka") +//public class KafkaSubscribe implements DataSubscribeProvider { +// +// private KafkaTemplate kafkaTemplate; +// +// public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { +// this.kafkaTemplate = kafkaTemplate; +// //回调方法、异常处理 +// this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); +// } +// +// @Override +// @Transactional +// public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { +// if (CollectionUtils.isEmpty(bizDataMapList)) { +// return; +// } +// Map contentMap = new HashMap(), mBody = new HashMap(); +// Map bizDataMap = bizDataMapList.get(0); +// switch ((Integer) bizDataMap.get("deviceType")) { +// case 32: +// contentMap.put("devType", "GasDetector"); +// setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); +// break; +// case 31: +// contentMap.put("devType", "Pressure"); +// setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); +// } +// +// if (bizDataMap.containsKey("dataValue")) { +// contentMap.put("mType", "Data"); +// if (bizDataMap.containsKey("cell")) { +// mBody.put("cell", bizDataMap.get("cell")); +// } +// mBody.put("datas", bizDataMapList); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// if (bizDataMap.containsKey("imei")) { +// contentMap.put("mType", "StartupRequest"); +// mBody.put("iccid", bizDataMap.get("iccid")); +// mBody.put("imei", bizDataMap.get("imei")); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置下发配置回复 +// private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { +// if (bizDataMap.containsKey("config")) { +// contentMap.put("mType", "SetResponse"); +// contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); +// if ("1".equals(bizDataMap.get("config"))) { +// mBody.put("bType", bTypeSuccess); +// }else { +// mBody.put("bType", bTypeFail); +// } +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置kafka回复 +// private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { +// if (bizDataMap.containsKey("devcode")) { +// contentMap.put("devCode", bizDataMap.get("devcode")); +// } +// contentMap.put("mBody", mBody); +// kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java index 3d89d0d..833a4b1 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java @@ -6,7 +6,7 @@ import com.casic.missiles.cache.ProtocolProcessEventListener; import com.casic.missiles.parser.safe.SafeStrategy; import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; +//import com.casic.missiles.parser.sender.impl.KafkaSubscribe; import com.casic.missiles.pojo.*; import com.casic.missiles.registry.DatagramEventRegistry; import com.casic.missiles.registry.SubscribeRegistry; @@ -148,8 +148,8 @@ * 数据订阅 */ public void storeData(List> bizDataMap) { - DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); - subscribeProvider.publishDataSubscribe(bizDataMap, null); +// DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); +// subscribeProvider.publishDataSubscribe(bizDataMap, null); } // DataSubscribeProvider dataSubscribeProvider = SpringContextUtil.getBean(processorInstance.getSubscribeBean()); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java index aef5616..327144a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java @@ -8,10 +8,12 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.FullHttpResponse; +import lombok.extern.slf4j.Slf4j; /** * @author cz */ +@Slf4j public class SensorhubReplier extends SimpleChannelInboundHandler { /** @@ -33,7 +35,7 @@ //-1为当前下发配置确认,不用回复 if (-1 != parseResult.getReplyCommand()) { ByteBuf replyByteBuf = abstractBuildReplyCommand.excute(parseResult); - System.out.println("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); + log.info("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); ((ByteBuf) replyByteBuf).resetReaderIndex(); //进行回复 ctx.channel().writeAndFlush(replyByteBuf); diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java index aed848d..b95d1d8 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java @@ -1,91 +1,92 @@ -package com.casic.missiles.parser.sender.impl; - -import cn.hutool.core.date.DateUtil; -import com.alibaba.fastjson.JSON; -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.pojo.SubscribeDetailConfig; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.stereotype.Component; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.util.CollectionUtils; - -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author cz - * @date 2023-11-10 - */ -@Component("kafka") -public class KafkaSubscribe implements DataSubscribeProvider { - - private KafkaTemplate kafkaTemplate; - - public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { - this.kafkaTemplate = kafkaTemplate; - //回调方法、异常处理 - this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); - } - - @Override - @Transactional - public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { - if (CollectionUtils.isEmpty(bizDataMapList)) { - return; - } - Map contentMap = new HashMap(), mBody = new HashMap(); - Map bizDataMap = bizDataMapList.get(0); - switch ((Integer) bizDataMap.get("deviceType")) { - case 32: - contentMap.put("devType", "GasDetector"); - setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); - break; - case 31: - contentMap.put("devType", "Pressure"); - setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); - } - - if (bizDataMap.containsKey("dataValue")) { - contentMap.put("mType", "Data"); - if (bizDataMap.containsKey("cell")) { - mBody.put("cell", bizDataMap.get("cell")); - } - mBody.put("datas", bizDataMapList); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - if (bizDataMap.containsKey("imei")) { - contentMap.put("mType", "StartupRequest"); - mBody.put("iccid", bizDataMap.get("iccid")); - mBody.put("imei", bizDataMap.get("imei")); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { - if (bizDataMap.containsKey("config")) { - contentMap.put("mType", "SetResponse"); - contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); - if ("1".equals(bizDataMap.get("config"))) { - mBody.put("bType", bTypeSuccess); - }else { - mBody.put("bType", bTypeFail); - } - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - - private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { - if (bizDataMap.containsKey("devcode")) { - contentMap.put("devCode", bizDataMap.get("devcode")); - } - contentMap.put("mBody", mBody); - kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); - } - -} +//package com.casic.missiles.parser.sender.impl; +// +//import cn.hutool.core.date.DateUtil; +//import com.alibaba.fastjson.JSON; +//import com.casic.missiles.autoconfig.KafkaSendResultHandler; +//import com.casic.missiles.parser.sender.DataSubscribeProvider; +//import com.casic.missiles.pojo.SubscribeDetailConfig; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.stereotype.Component; +//import org.springframework.transaction.annotation.Transactional; +//import org.springframework.util.CollectionUtils; +// +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.Map; +// +///** +// * @author cz +// * @date 2023-11-10 +// */ +//@Component("kafka") +//public class KafkaSubscribe implements DataSubscribeProvider { +// +// private KafkaTemplate kafkaTemplate; +// +// public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { +// this.kafkaTemplate = kafkaTemplate; +// //回调方法、异常处理 +// this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); +// } +// +// @Override +// @Transactional +// public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { +// if (CollectionUtils.isEmpty(bizDataMapList)) { +// return; +// } +// Map contentMap = new HashMap(), mBody = new HashMap(); +// Map bizDataMap = bizDataMapList.get(0); +// switch ((Integer) bizDataMap.get("deviceType")) { +// case 32: +// contentMap.put("devType", "GasDetector"); +// setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); +// break; +// case 31: +// contentMap.put("devType", "Pressure"); +// setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); +// } +// +// if (bizDataMap.containsKey("dataValue")) { +// contentMap.put("mType", "Data"); +// if (bizDataMap.containsKey("cell")) { +// mBody.put("cell", bizDataMap.get("cell")); +// } +// mBody.put("datas", bizDataMapList); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// if (bizDataMap.containsKey("imei")) { +// contentMap.put("mType", "StartupRequest"); +// mBody.put("iccid", bizDataMap.get("iccid")); +// mBody.put("imei", bizDataMap.get("imei")); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置下发配置回复 +// private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { +// if (bizDataMap.containsKey("config")) { +// contentMap.put("mType", "SetResponse"); +// contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); +// if ("1".equals(bizDataMap.get("config"))) { +// mBody.put("bType", bTypeSuccess); +// }else { +// mBody.put("bType", bTypeFail); +// } +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置kafka回复 +// private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { +// if (bizDataMap.containsKey("devcode")) { +// contentMap.put("devCode", bizDataMap.get("devcode")); +// } +// contentMap.put("mBody", mBody); +// kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java index 3d89d0d..833a4b1 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java @@ -6,7 +6,7 @@ import com.casic.missiles.cache.ProtocolProcessEventListener; import com.casic.missiles.parser.safe.SafeStrategy; import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; +//import com.casic.missiles.parser.sender.impl.KafkaSubscribe; import com.casic.missiles.pojo.*; import com.casic.missiles.registry.DatagramEventRegistry; import com.casic.missiles.registry.SubscribeRegistry; @@ -148,8 +148,8 @@ * 数据订阅 */ public void storeData(List> bizDataMap) { - DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); - subscribeProvider.publishDataSubscribe(bizDataMap, null); +// DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); +// subscribeProvider.publishDataSubscribe(bizDataMap, null); } // DataSubscribeProvider dataSubscribeProvider = SpringContextUtil.getBean(processorInstance.getSubscribeBean()); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java index aef5616..327144a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java @@ -8,10 +8,12 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.FullHttpResponse; +import lombok.extern.slf4j.Slf4j; /** * @author cz */ +@Slf4j public class SensorhubReplier extends SimpleChannelInboundHandler { /** @@ -33,7 +35,7 @@ //-1为当前下发配置确认,不用回复 if (-1 != parseResult.getReplyCommand()) { ByteBuf replyByteBuf = abstractBuildReplyCommand.excute(parseResult); - System.out.println("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); + log.info("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); ((ByteBuf) replyByteBuf).resetReaderIndex(); //进行回复 ctx.channel().writeAndFlush(replyByteBuf); diff --git a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java index babf0a3..6243a7a 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java @@ -8,7 +8,7 @@ FILE_NAMES(new HashMap() { { put(COMMERCIAL_GAS, "GT_BIR1000-APP_v1.1.bin"); - put(PRESSURE, "BIRMM-P1000N-APP_v1.0.bin"); + put(PRESSURE, "BIRMM-P1000N-APP_v1.1.bin"); } }); diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java index aed848d..b95d1d8 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java @@ -1,91 +1,92 @@ -package com.casic.missiles.parser.sender.impl; - -import cn.hutool.core.date.DateUtil; -import com.alibaba.fastjson.JSON; -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.pojo.SubscribeDetailConfig; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.stereotype.Component; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.util.CollectionUtils; - -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author cz - * @date 2023-11-10 - */ -@Component("kafka") -public class KafkaSubscribe implements DataSubscribeProvider { - - private KafkaTemplate kafkaTemplate; - - public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { - this.kafkaTemplate = kafkaTemplate; - //回调方法、异常处理 - this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); - } - - @Override - @Transactional - public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { - if (CollectionUtils.isEmpty(bizDataMapList)) { - return; - } - Map contentMap = new HashMap(), mBody = new HashMap(); - Map bizDataMap = bizDataMapList.get(0); - switch ((Integer) bizDataMap.get("deviceType")) { - case 32: - contentMap.put("devType", "GasDetector"); - setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); - break; - case 31: - contentMap.put("devType", "Pressure"); - setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); - } - - if (bizDataMap.containsKey("dataValue")) { - contentMap.put("mType", "Data"); - if (bizDataMap.containsKey("cell")) { - mBody.put("cell", bizDataMap.get("cell")); - } - mBody.put("datas", bizDataMapList); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - if (bizDataMap.containsKey("imei")) { - contentMap.put("mType", "StartupRequest"); - mBody.put("iccid", bizDataMap.get("iccid")); - mBody.put("imei", bizDataMap.get("imei")); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { - if (bizDataMap.containsKey("config")) { - contentMap.put("mType", "SetResponse"); - contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); - if ("1".equals(bizDataMap.get("config"))) { - mBody.put("bType", bTypeSuccess); - }else { - mBody.put("bType", bTypeFail); - } - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - - private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { - if (bizDataMap.containsKey("devcode")) { - contentMap.put("devCode", bizDataMap.get("devcode")); - } - contentMap.put("mBody", mBody); - kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); - } - -} +//package com.casic.missiles.parser.sender.impl; +// +//import cn.hutool.core.date.DateUtil; +//import com.alibaba.fastjson.JSON; +//import com.casic.missiles.autoconfig.KafkaSendResultHandler; +//import com.casic.missiles.parser.sender.DataSubscribeProvider; +//import com.casic.missiles.pojo.SubscribeDetailConfig; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.stereotype.Component; +//import org.springframework.transaction.annotation.Transactional; +//import org.springframework.util.CollectionUtils; +// +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.Map; +// +///** +// * @author cz +// * @date 2023-11-10 +// */ +//@Component("kafka") +//public class KafkaSubscribe implements DataSubscribeProvider { +// +// private KafkaTemplate kafkaTemplate; +// +// public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { +// this.kafkaTemplate = kafkaTemplate; +// //回调方法、异常处理 +// this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); +// } +// +// @Override +// @Transactional +// public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { +// if (CollectionUtils.isEmpty(bizDataMapList)) { +// return; +// } +// Map contentMap = new HashMap(), mBody = new HashMap(); +// Map bizDataMap = bizDataMapList.get(0); +// switch ((Integer) bizDataMap.get("deviceType")) { +// case 32: +// contentMap.put("devType", "GasDetector"); +// setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); +// break; +// case 31: +// contentMap.put("devType", "Pressure"); +// setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); +// } +// +// if (bizDataMap.containsKey("dataValue")) { +// contentMap.put("mType", "Data"); +// if (bizDataMap.containsKey("cell")) { +// mBody.put("cell", bizDataMap.get("cell")); +// } +// mBody.put("datas", bizDataMapList); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// if (bizDataMap.containsKey("imei")) { +// contentMap.put("mType", "StartupRequest"); +// mBody.put("iccid", bizDataMap.get("iccid")); +// mBody.put("imei", bizDataMap.get("imei")); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置下发配置回复 +// private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { +// if (bizDataMap.containsKey("config")) { +// contentMap.put("mType", "SetResponse"); +// contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); +// if ("1".equals(bizDataMap.get("config"))) { +// mBody.put("bType", bTypeSuccess); +// }else { +// mBody.put("bType", bTypeFail); +// } +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置kafka回复 +// private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { +// if (bizDataMap.containsKey("devcode")) { +// contentMap.put("devCode", bizDataMap.get("devcode")); +// } +// contentMap.put("mBody", mBody); +// kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java index 3d89d0d..833a4b1 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java @@ -6,7 +6,7 @@ import com.casic.missiles.cache.ProtocolProcessEventListener; import com.casic.missiles.parser.safe.SafeStrategy; import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; +//import com.casic.missiles.parser.sender.impl.KafkaSubscribe; import com.casic.missiles.pojo.*; import com.casic.missiles.registry.DatagramEventRegistry; import com.casic.missiles.registry.SubscribeRegistry; @@ -148,8 +148,8 @@ * 数据订阅 */ public void storeData(List> bizDataMap) { - DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); - subscribeProvider.publishDataSubscribe(bizDataMap, null); +// DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); +// subscribeProvider.publishDataSubscribe(bizDataMap, null); } // DataSubscribeProvider dataSubscribeProvider = SpringContextUtil.getBean(processorInstance.getSubscribeBean()); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java index aef5616..327144a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java @@ -8,10 +8,12 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.FullHttpResponse; +import lombok.extern.slf4j.Slf4j; /** * @author cz */ +@Slf4j public class SensorhubReplier extends SimpleChannelInboundHandler { /** @@ -33,7 +35,7 @@ //-1为当前下发配置确认,不用回复 if (-1 != parseResult.getReplyCommand()) { ByteBuf replyByteBuf = abstractBuildReplyCommand.excute(parseResult); - System.out.println("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); + log.info("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); ((ByteBuf) replyByteBuf).resetReaderIndex(); //进行回复 ctx.channel().writeAndFlush(replyByteBuf); diff --git a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java index babf0a3..6243a7a 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java @@ -8,7 +8,7 @@ FILE_NAMES(new HashMap() { { put(COMMERCIAL_GAS, "GT_BIR1000-APP_v1.1.bin"); - put(PRESSURE, "BIRMM-P1000N-APP_v1.0.bin"); + put(PRESSURE, "BIRMM-P1000N-APP_v1.1.bin"); } }); diff --git a/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java b/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java index c1e683f..9a73a36 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java @@ -78,6 +78,6 @@ /** * 操作类型呈对应的关系 */ - String PDU_TYPE = "操作类型"; + String PDU_TYPE = "PDUType"; } diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java index aed848d..b95d1d8 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java @@ -1,91 +1,92 @@ -package com.casic.missiles.parser.sender.impl; - -import cn.hutool.core.date.DateUtil; -import com.alibaba.fastjson.JSON; -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.pojo.SubscribeDetailConfig; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.stereotype.Component; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.util.CollectionUtils; - -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author cz - * @date 2023-11-10 - */ -@Component("kafka") -public class KafkaSubscribe implements DataSubscribeProvider { - - private KafkaTemplate kafkaTemplate; - - public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { - this.kafkaTemplate = kafkaTemplate; - //回调方法、异常处理 - this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); - } - - @Override - @Transactional - public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { - if (CollectionUtils.isEmpty(bizDataMapList)) { - return; - } - Map contentMap = new HashMap(), mBody = new HashMap(); - Map bizDataMap = bizDataMapList.get(0); - switch ((Integer) bizDataMap.get("deviceType")) { - case 32: - contentMap.put("devType", "GasDetector"); - setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); - break; - case 31: - contentMap.put("devType", "Pressure"); - setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); - } - - if (bizDataMap.containsKey("dataValue")) { - contentMap.put("mType", "Data"); - if (bizDataMap.containsKey("cell")) { - mBody.put("cell", bizDataMap.get("cell")); - } - mBody.put("datas", bizDataMapList); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - if (bizDataMap.containsKey("imei")) { - contentMap.put("mType", "StartupRequest"); - mBody.put("iccid", bizDataMap.get("iccid")); - mBody.put("imei", bizDataMap.get("imei")); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { - if (bizDataMap.containsKey("config")) { - contentMap.put("mType", "SetResponse"); - contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); - if ("1".equals(bizDataMap.get("config"))) { - mBody.put("bType", bTypeSuccess); - }else { - mBody.put("bType", bTypeFail); - } - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - - private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { - if (bizDataMap.containsKey("devcode")) { - contentMap.put("devCode", bizDataMap.get("devcode")); - } - contentMap.put("mBody", mBody); - kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); - } - -} +//package com.casic.missiles.parser.sender.impl; +// +//import cn.hutool.core.date.DateUtil; +//import com.alibaba.fastjson.JSON; +//import com.casic.missiles.autoconfig.KafkaSendResultHandler; +//import com.casic.missiles.parser.sender.DataSubscribeProvider; +//import com.casic.missiles.pojo.SubscribeDetailConfig; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.stereotype.Component; +//import org.springframework.transaction.annotation.Transactional; +//import org.springframework.util.CollectionUtils; +// +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.Map; +// +///** +// * @author cz +// * @date 2023-11-10 +// */ +//@Component("kafka") +//public class KafkaSubscribe implements DataSubscribeProvider { +// +// private KafkaTemplate kafkaTemplate; +// +// public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { +// this.kafkaTemplate = kafkaTemplate; +// //回调方法、异常处理 +// this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); +// } +// +// @Override +// @Transactional +// public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { +// if (CollectionUtils.isEmpty(bizDataMapList)) { +// return; +// } +// Map contentMap = new HashMap(), mBody = new HashMap(); +// Map bizDataMap = bizDataMapList.get(0); +// switch ((Integer) bizDataMap.get("deviceType")) { +// case 32: +// contentMap.put("devType", "GasDetector"); +// setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); +// break; +// case 31: +// contentMap.put("devType", "Pressure"); +// setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); +// } +// +// if (bizDataMap.containsKey("dataValue")) { +// contentMap.put("mType", "Data"); +// if (bizDataMap.containsKey("cell")) { +// mBody.put("cell", bizDataMap.get("cell")); +// } +// mBody.put("datas", bizDataMapList); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// if (bizDataMap.containsKey("imei")) { +// contentMap.put("mType", "StartupRequest"); +// mBody.put("iccid", bizDataMap.get("iccid")); +// mBody.put("imei", bizDataMap.get("imei")); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置下发配置回复 +// private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { +// if (bizDataMap.containsKey("config")) { +// contentMap.put("mType", "SetResponse"); +// contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); +// if ("1".equals(bizDataMap.get("config"))) { +// mBody.put("bType", bTypeSuccess); +// }else { +// mBody.put("bType", bTypeFail); +// } +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置kafka回复 +// private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { +// if (bizDataMap.containsKey("devcode")) { +// contentMap.put("devCode", bizDataMap.get("devcode")); +// } +// contentMap.put("mBody", mBody); +// kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java index 3d89d0d..833a4b1 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java @@ -6,7 +6,7 @@ import com.casic.missiles.cache.ProtocolProcessEventListener; import com.casic.missiles.parser.safe.SafeStrategy; import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; +//import com.casic.missiles.parser.sender.impl.KafkaSubscribe; import com.casic.missiles.pojo.*; import com.casic.missiles.registry.DatagramEventRegistry; import com.casic.missiles.registry.SubscribeRegistry; @@ -148,8 +148,8 @@ * 数据订阅 */ public void storeData(List> bizDataMap) { - DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); - subscribeProvider.publishDataSubscribe(bizDataMap, null); +// DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); +// subscribeProvider.publishDataSubscribe(bizDataMap, null); } // DataSubscribeProvider dataSubscribeProvider = SpringContextUtil.getBean(processorInstance.getSubscribeBean()); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java index aef5616..327144a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java @@ -8,10 +8,12 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.FullHttpResponse; +import lombok.extern.slf4j.Slf4j; /** * @author cz */ +@Slf4j public class SensorhubReplier extends SimpleChannelInboundHandler { /** @@ -33,7 +35,7 @@ //-1为当前下发配置确认,不用回复 if (-1 != parseResult.getReplyCommand()) { ByteBuf replyByteBuf = abstractBuildReplyCommand.excute(parseResult); - System.out.println("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); + log.info("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); ((ByteBuf) replyByteBuf).resetReaderIndex(); //进行回复 ctx.channel().writeAndFlush(replyByteBuf); diff --git a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java index babf0a3..6243a7a 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java @@ -8,7 +8,7 @@ FILE_NAMES(new HashMap() { { put(COMMERCIAL_GAS, "GT_BIR1000-APP_v1.1.bin"); - put(PRESSURE, "BIRMM-P1000N-APP_v1.0.bin"); + put(PRESSURE, "BIRMM-P1000N-APP_v1.1.bin"); } }); diff --git a/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java b/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java index c1e683f..9a73a36 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java @@ -78,6 +78,6 @@ /** * 操作类型呈对应的关系 */ - String PDU_TYPE = "操作类型"; + String PDU_TYPE = "PDUType"; } diff --git a/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java b/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java index 8484cfd..d7de317 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java @@ -31,7 +31,7 @@ /** - * 设备编号 + * 设备源数据 */ private String decryptSoureData; diff --git a/casic-iot-web/src/main/resources/config/application.yml b/casic-iot-web/src/main/resources/config/application.yml index 6e085f6..8bd96af 100644 --- a/casic-iot-web/src/main/resources/config/application.yml +++ b/casic-iot-web/src/main/resources/config/application.yml @@ -8,35 +8,35 @@ multipart: max-file-size: 50MB max-request-size: 80MB - kafka: - #kafka配置 - producer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - transaction-id-prefix: kafkaTx- - retries: 3 - acks: all - batch-size: 16384 - buffer-memory: 1024000 - consumer: - # Kafka服务器 - bootstrap-servers: '111.198.10.15:12502' - group-id: 1 - auto-offset-reset: latest - enable-auto-commit: false - max-poll-records: 3 - properties: - max: - poll: - interval: - ms: 600000 - session: - timeout: - ms: 10000 - listener: - concurrency: 4 - ack-mode: manual_immediate - missing-topics-fatal: false +# kafka: +# #kafka配置 +# producer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# transaction-id-prefix: kafkaTx- +# retries: 3 +# acks: all +# batch-size: 16384 +# buffer-memory: 1024000 +# consumer: +# # Kafka服务器 +# bootstrap-servers: '111.198.10.15:12502' +# group-id: 1 +# auto-offset-reset: latest +# enable-auto-commit: false +# max-poll-records: 3 +# properties: +# max: +# poll: +# interval: +# ms: 600000 +# session: +# timeout: +# ms: 10000 +# listener: +# concurrency: 4 +# ack-mode: manual_immediate +# missing-topics-fatal: false mybatis-plus: global-config: #字段策略 0:"所有字段都更新和插入" 1:"只更新和插入非NULL值" 2:"只更新和插入非NULL值且非空字符串" diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java new file mode 100644 index 0000000..d54b7a9 --- /dev/null +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/DirectMemoryReporter.java @@ -0,0 +1,34 @@ +package com.casic.missiles.autoconfig; + +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.PlatformDependent; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.ReflectionUtils; + +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class DirectMemoryReporter { + + private Integer _1k = 1024; + private Long directMomery; + + public DirectMemoryReporter() { + Field field = ReflectionUtils.findField(PlatformDependent.class,"DIRECT_MEMORY_COUNTER"); + field.setAccessible(true); + try { + directMomery = PlatformDependent.usedDirectMemory(); + } catch (Exception e) { + } + + GlobalEventExecutor.INSTANCE.scheduleAtFixedRate(this::doReport, 0, 1, TimeUnit.SECONDS); + } + + + + public void doReport() { + log.debug("netty_directMomery_log ,{}k" , directMomery / _1k); + } +} \ No newline at end of file diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java index 24e4884..e494726 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerConfig.java @@ -1,100 +1,100 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; -import org.springframework.kafka.config.KafkaListenerContainerFactory; -import org.springframework.kafka.core.DefaultKafkaConsumerFactory; -import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; -import org.springframework.kafka.listener.ContainerProperties; - -import java.util.HashMap; -import java.util.Map; - - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaConsumerConfig { - - @Value("${spring.kafka.consumer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.consumer.group-id}") - private String groupId; - @Value("${spring.kafka.consumer.enable-auto-commit}") - private boolean enableAutoCommit; - @Value("${spring.kafka.properties.session.timeout.ms}") - private String sessionTimeout; - @Value("${spring.kafka.properties.max.poll.interval.ms}") - private String maxPollIntervalTime; - @Value("${spring.kafka.consumer.max-poll-records}") - private String maxPollRecords; - @Value("${spring.kafka.consumer.auto-offset-reset}") - private String autoOffsetReset; - @Value("${spring.kafka.listener.concurrency}") - private Integer concurrency; - @Value("${spring.kafka.listener.missing-topics-fatal}") - private boolean missingTopicsFatal; - - private final long pollTimeout = 600000; - - @Bean - public Map consumerConfigs() { - Map propsMap = new HashMap<>(16); - // 服务器地址,不多说配置直接用 - propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - // groupId不多说,直接用 - propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 - propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); - //自动提交的时间间隔,自动提交开启时生效 - propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); - //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: - //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); - //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance - propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); - //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 - propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); - //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s - propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); - //序列化(我们这边使用StringDeserializer,与生产者保持一致) - propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 -// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// propsMap.put("java.security.auth.login.config", "10000"); - // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 -// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return propsMap; - } - - // 消费者工厂,将配置信息加载进去 - @Bean("consumerFactory") - public DefaultKafkaConsumerFactory consumerFactory() { - return new DefaultKafkaConsumerFactory(consumerConfigs()); - } - - @Bean("listenerContainerFactory") - public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { - ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); - factory.setConsumerFactory(consumerFactory()); - //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 - factory.setConcurrency(concurrency); - //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 - factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); - //自动提交关闭,需要设置手动消息确认 - factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); - factory.getContainerProperties().setPollTimeout(pollTimeout); - return factory; - } -} - +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.consumer.ConsumerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringDeserializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; +//import org.springframework.kafka.config.KafkaListenerContainerFactory; +//import org.springframework.kafka.core.DefaultKafkaConsumerFactory; +//import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; +//import org.springframework.kafka.listener.ContainerProperties; +// +//import java.util.HashMap; +//import java.util.Map; +// +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaConsumerConfig { +// +// @Value("${spring.kafka.consumer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.consumer.group-id}") +// private String groupId; +// @Value("${spring.kafka.consumer.enable-auto-commit}") +// private boolean enableAutoCommit; +// @Value("${spring.kafka.properties.session.timeout.ms}") +// private String sessionTimeout; +// @Value("${spring.kafka.properties.max.poll.interval.ms}") +// private String maxPollIntervalTime; +// @Value("${spring.kafka.consumer.max-poll-records}") +// private String maxPollRecords; +// @Value("${spring.kafka.consumer.auto-offset-reset}") +// private String autoOffsetReset; +// @Value("${spring.kafka.listener.concurrency}") +// private Integer concurrency; +// @Value("${spring.kafka.listener.missing-topics-fatal}") +// private boolean missingTopicsFatal; +// +// private final long pollTimeout = 600000; +// +// @Bean +// public Map consumerConfigs() { +// Map propsMap = new HashMap<>(16); +// // 服务器地址,不多说配置直接用 +// propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// // groupId不多说,直接用 +// propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); +// //是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量 +// propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); +// //自动提交的时间间隔,自动提交开启时生效 +// propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000"); +// //该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: +// //我们使用latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 +// propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); +// //两次poll之间的最大间隔,默认值为5分钟。如果超过这个间隔会触发reBalance +// propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalTime); +// //这个参数定义了poll方法最多可以拉取多少条消息,默认值为500。 +// propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); +// //当broker多久没有收到consumer的心跳请求后就触发reBalance,默认值是10s +// propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); +// //序列化(我们这边使用StringDeserializer,与生产者保持一致) +// propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +// // 下面四个参数是用户名密码的参数,没有用户名密码可以去掉以下配置 +//// propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// propsMap.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// propsMap.put("java.security.auth.login.config", "10000"); +// // 这里username设置用户名, password设置密码我写死到代码里了,可以更改为nacos配置 +//// propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return propsMap; +// } +// +// // 消费者工厂,将配置信息加载进去 +// @Bean("consumerFactory") +// public DefaultKafkaConsumerFactory consumerFactory() { +// return new DefaultKafkaConsumerFactory(consumerConfigs()); +// } +// +// @Bean("listenerContainerFactory") +// public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { +// ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); +// factory.setConsumerFactory(consumerFactory()); +// //在侦听器容器中运行的线程数,一般设置为 机器数*分区数 +// factory.setConcurrency(concurrency); +// //消费监听接口监听的主题不存在时,默认会报错,所以设置为false忽略错误 +// factory.getContainerProperties().setMissingTopicsFatal(missingTopicsFatal); +// //自动提交关闭,需要设置手动消息确认 +// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); +// factory.getContainerProperties().setPollTimeout(pollTimeout); +// return factory; +// } +//} +// diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java index c50a4e3..6600312 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListener.java @@ -1,31 +1,31 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.config.KafkaListenerEndpointRegistry; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.web.bind.annotation.RestController; - -import javax.annotation.Resource; - -@RestController() -public class KafkaConsumerListener{ - @Resource - private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; - /** - * 监听kafka消息 - * - * 使用autoStartup = "false"必须指定id - */ - @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") - public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { - try { - System.out.println("listenTopics接受消息:" + consumerRecord.value()); - //手动确认 - ack.acknowledge(); - } catch (Exception e) { - System.out.println("消费失败:" + e); - } - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.consumer.ConsumerRecord; +//import org.springframework.kafka.annotation.KafkaListener; +//import org.springframework.kafka.config.KafkaListenerEndpointRegistry; +//import org.springframework.kafka.support.Acknowledgment; +//import org.springframework.web.bind.annotation.RestController; +// +//import javax.annotation.Resource; +// +//@RestController() +//public class KafkaConsumerListener{ +// @Resource +// private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry; +// /** +// * 监听kafka消息 +// * +// * 使用autoStartup = "false"必须指定id +// */ +// @KafkaListener(id = "${spring.kafka.consumer.group-id}", topics = {"KAFKA_TEST_TOPICS"}, autoStartup = "true") +// public void listenTopics(ConsumerRecord consumerRecord, Acknowledgment ack) { +// try { +// System.out.println("listenTopics接受消息:" + consumerRecord.value()); +// //手动确认 +// ack.acknowledge(); +// } catch (Exception e) { +// System.out.println("消费失败:" + e); +// } +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java index d6f771a..dbc561c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaConsumerListenerError.java @@ -1,29 +1,29 @@ -package com.casic.missiles.autoconfig; - -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.kafka.clients.consumer.Consumer; -import org.springframework.kafka.listener.KafkaListenerErrorHandler; -import org.springframework.kafka.listener.ListenerExecutionFailedException; -import org.springframework.messaging.Message; -import org.springframework.stereotype.Component; - -@Component -public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { - - - @Override - @NonNull - public Object handleError(Message message, ListenerExecutionFailedException e) { - return new Object(); - } - - @Override - public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { - System.out.println("消息详情:" + message); - System.out.println("异常信息::" + exception); - System.out.println("消费者详情::" + consumer.groupMetadata()); - System.out.println("监听主题::" + consumer.listTopics()); - return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); - } - -} +//package com.casic.missiles.autoconfig; +// +//import edu.umd.cs.findbugs.annotations.NonNull; +//import org.apache.kafka.clients.consumer.Consumer; +//import org.springframework.kafka.listener.KafkaListenerErrorHandler; +//import org.springframework.kafka.listener.ListenerExecutionFailedException; +//import org.springframework.messaging.Message; +//import org.springframework.stereotype.Component; +// +//@Component +//public class KafkaConsumerListenerError implements KafkaListenerErrorHandler { +// +// +// @Override +// @NonNull +// public Object handleError(Message message, ListenerExecutionFailedException e) { +// return new Object(); +// } +// +// @Override +// public Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer) { +// System.out.println("消息详情:" + message); +// System.out.println("异常信息::" + exception); +// System.out.println("消费者详情::" + consumer.groupMetadata()); +// System.out.println("监听主题::" + consumer.listTopics()); +// return KafkaListenerErrorHandler.super.handleError(message, exception, consumer); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java index e8e36ec..1d605ed 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaProviderConfig.java @@ -1,85 +1,85 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringSerializer; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Primary; -import org.springframework.kafka.core.DefaultKafkaProducerFactory; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.core.ProducerFactory; -import org.springframework.kafka.transaction.KafkaTransactionManager; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author cz - */ -@SpringBootConfiguration -public class KafkaProviderConfig { - - @Value("${spring.kafka.producer.bootstrap-servers}") - private String bootstrapServers; - @Value("${spring.kafka.producer.acks}") - private String acks; - @Value("${spring.kafka.producer.retries}") - private String retries; - @Value("${spring.kafka.producer.batch-size}") - private String batchSize; - @Value("${spring.kafka.producer.buffer-memory}") - private String bufferMemory; - - @Bean - public Map producerConfigs() { - Map props = new HashMap<>(16); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 - props.put(ProducerConfig.ACKS_CONFIG, acks); - //发生错误后,消息重发的次数,开启事务必须大于0 - props.put(ProducerConfig.RETRIES_CONFIG, retries); - //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 - props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); - //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, - props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); - //生产者内存缓冲区的大小 - props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); - //序列和消费者对应 - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); - - //用户名密码配置,没有用户名密码可以去掉以下配置 -// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); -// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); -// props.put("java.security.auth.login.config", "10000"); - // 可以在nacos配置文件中配置 -// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); - return props; - } - - // 生产者工厂 - @Bean("kafkaProduceFactory") - public ProducerFactory producerFactory() { - DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); - factory.setTransactionIdPrefix("kafkaTx-"); - return factory; - } - - // 事务处理 - // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 - @Bean("kafkaTransactionManager") - @Primary - public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { - return new KafkaTransactionManager(producerFactory); - } - - @Bean - public KafkaTemplate kafkaTemplate() { - return new KafkaTemplate<>(producerFactory()); - } - -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.CommonClientConfigs; +//import org.apache.kafka.clients.producer.ProducerConfig; +//import org.apache.kafka.common.config.SaslConfigs; +//import org.apache.kafka.common.security.auth.SecurityProtocol; +//import org.apache.kafka.common.serialization.StringSerializer; +//import org.springframework.beans.factory.annotation.Value; +//import org.springframework.boot.SpringBootConfiguration; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Primary; +//import org.springframework.kafka.core.DefaultKafkaProducerFactory; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.kafka.core.ProducerFactory; +//import org.springframework.kafka.transaction.KafkaTransactionManager; +// +//import java.util.HashMap; +//import java.util.Map; +// +///** +// * @author cz +// */ +//@SpringBootConfiguration +//public class KafkaProviderConfig { +// +// @Value("${spring.kafka.producer.bootstrap-servers}") +// private String bootstrapServers; +// @Value("${spring.kafka.producer.acks}") +// private String acks; +// @Value("${spring.kafka.producer.retries}") +// private String retries; +// @Value("${spring.kafka.producer.batch-size}") +// private String batchSize; +// @Value("${spring.kafka.producer.buffer-memory}") +// private String bufferMemory; +// +// @Bean +// public Map producerConfigs() { +// Map props = new HashMap<>(16); +// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); +// //响应模式,我们使用acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。 +// props.put(ProducerConfig.ACKS_CONFIG, acks); +// //发生错误后,消息重发的次数,开启事务必须大于0 +// props.put(ProducerConfig.RETRIES_CONFIG, retries); +// //当多个消息发送到相同分区时,生产者会将消息打包到一起,以减少请求交互. 而不是一条条发送 +// props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); +// //有的时刻消息比较少,过了很久,比如5min也没有凑够16KB,这样延时就很大,所以需要一个参数. 再设置一个时间,到了这个时间, +// props.put(ProducerConfig.LINGER_MS_CONFIG, "5000"); +// //生产者内存缓冲区的大小 +// props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); +// //序列和消费者对应 +// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +// +// //用户名密码配置,没有用户名密码可以去掉以下配置 +//// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); +//// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); +//// props.put("java.security.auth.login.config", "10000"); +// // 可以在nacos配置文件中配置 +//// props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin1234\";")); +// return props; +// } +// +// // 生产者工厂 +// @Bean("kafkaProduceFactory") +// public ProducerFactory producerFactory() { +// DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs()); +// factory.setTransactionIdPrefix("kafkaTx-"); +// return factory; +// } +// +// // 事务处理 +// // 这里的事务处理会和项目中的其他事务起冲突,所以我一般会把@Bean去掉,不用spring代理 +// @Bean("kafkaTransactionManager") +// @Primary +// public KafkaTransactionManager kafkaTransactionManager(ProducerFactory producerFactory) { +// return new KafkaTransactionManager(producerFactory); +// } +// +// @Bean +// public KafkaTemplate kafkaTemplate() { +// return new KafkaTemplate<>(producerFactory()); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java index 1fdac64..1598ee0 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/autoconfig/KafkaSendResultHandler.java @@ -1,22 +1,22 @@ -package com.casic.missiles.autoconfig; - -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.springframework.kafka.support.ProducerListener; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; - -@Component -public class KafkaSendResultHandler implements ProducerListener { - - @Override - public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { - System.out.println("消息发送成功:" + producerRecord.toString()); - } - - @Override - public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { - System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); - } -} +//package com.casic.missiles.autoconfig; +// +//import org.apache.kafka.clients.producer.ProducerRecord; +//import org.apache.kafka.clients.producer.RecordMetadata; +//import org.springframework.kafka.support.ProducerListener; +//import org.springframework.stereotype.Component; +// +//import javax.annotation.Nullable; +// +//@Component +//public class KafkaSendResultHandler implements ProducerListener { +// +// @Override +// public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) { +// System.out.println("消息发送成功:" + producerRecord.toString()); +// } +// +// @Override +// public void onError(ProducerRecord producerRecord, @Nullable RecordMetadata recordMetadata, Exception exception) { +// System.out.println("消息发送失败:" + producerRecord.toString() + exception.getMessage()); +// } +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java index b0bf96d..5144189 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/cache/ProtocolProcessEventListener.java @@ -157,7 +157,7 @@ deviceDataList.add(deviceData); } //批量保存 - deviceDataService.saveBatch(deviceDataList); +// deviceDataService.saveBatch(deviceDataList); } } //移除缓存 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java index 2d0aeae..5e3ba38 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/AepCommandSend.java @@ -64,7 +64,7 @@ request.setParamMasterKey("0a77886fae4f4ff68d926adeb3a3ef5b"); // single value request.setBody(JSONObject.toJSONString(busConfigParam).getBytes()); CreateCommandLwm2mProfileResponse msgResponse = client.CreateCommandLwm2mProfile(request); - log.info("-----" + msgResponse.getMessage()); + log.info("send status-----" + msgResponse.getMessage()); } catch (Exception ex) { } finally { client.shutdown(); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java deleted file mode 100644 index d679bf1..0000000 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/KafkaController.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.casic.missiles.controller; - -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.kafka.support.KafkaHeaders; -import org.springframework.messaging.MessageHeaders; -import org.springframework.messaging.support.GenericMessage; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -@RestController -@RequestMapping("/provider") -//这个注解代表这个类开启Springboot事务,因为我们在Kafka的配置文件开启了Kafka事务,不然会报错 -public class KafkaController { - - @Autowired - private KafkaSubscribe kafkaSubscribe; - - @RequestMapping("/send") - @Transactional - public String sendMultiple() throws Exception { - kafkaSubscribe.publishDataSubscribe(null, null); - return "okk"; - } - - -// /** -// * Kafka提供了多种构建消息的方式 -// * -// * @throws ExecutionException -// * @throws InterruptedException -// * @throws TimeoutException -// */ -// public void sendDemo() throws ExecutionException, InterruptedException, TimeoutException { -// //后面的get代表同步发送,括号内时间可选,代表超过这个时间会抛出超时异常,但是仍会发送成功 -// kafkaTemplate.send("topic1", "发给topic1").get(); -// //使用ProducerRecord发送消息 -// ProducerRecord producerRecord = new ProducerRecord<>("topic.quick.demo", "use ProducerRecord to send message"); -// kafkaTemplate.send(producerRecord); -// //使用Message发送消息 -// Map map = new HashMap<>(); -// map.put(KafkaHeaders.TOPIC, "topic.quick.demo"); -// map.put(KafkaHeaders.PARTITION_ID, 0); -// map.put(KafkaHeaders.MESSAGE_KEY, 0); -// GenericMessage message = new GenericMessage<>("use Message to send message", new MessageHeaders(map)); -// kafkaTemplate.send(message); -// } - -} - diff --git a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java index e5fa505..dfed36a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/controller/ReceiveController.java @@ -18,13 +18,13 @@ private final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(5, 10, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10)); @RequestMapping("/data") - public Object testNbResponse(@RequestBody Map h2sDataMap) { + public Object testNbResponse(@RequestBody Map dataMap) { ResponseData responseData = new ResponseData(); - System.out.println(JSON.toJSON(h2sDataMap)); + System.out.println(JSON.toJSON(dataMap)); threadPoolExecutor.execute( () -> { AepCommandSend aepCommandSend = new AepCommandSend(); - aepCommandSend.sendConfig(h2sDataMap); + aepCommandSend.sendConfig(dataMap); } ); responseData.setCode(200); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java index c4a534c..a946173 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/netty/SensorhubServer.java @@ -3,6 +3,7 @@ import com.casic.missiles.autoconfig.SensorhubProperties; import com.casic.missiles.netty.handler.SensorhubServerChannelInitialHandler; import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; @@ -71,6 +72,8 @@ b.group(bossGroup, workerGroup) // 设置EventLoopGroup .channel(NioServerSocketChannel.class) // 指明新的Channel的类型 .childHandler(new SensorhubServerChannelInitialHandler()) // 指定ChannelHandler + .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_BACKLOG, 1024) // 设置的ServerChannel的一些选项 .childOption(ChannelOption.SO_KEEPALIVE, true); // 当SO_KEEPALIVE=true的时候,服务端可以探测客户端的连接是否还存活着,如果客户端关闭了,那么服务端的连接可以关闭掉,释放资源 //增加检测 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java index bb41939..ec9a04f 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/GenericProtocolParser.java @@ -122,7 +122,7 @@ } ProtocolProcessEventListener.setTask(devcode, bizDataMap, 3); //存储数据 - datagramEventProvider.storeData(bizDataMap); +// datagramEventProvider.storeData(bizDataMap); } catch (RuntimeException rex) { log.error("解析出现异常,异常信息为{}", rex); //数据发送,异步,异常拦截 diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java index e4a79ae..c886d25 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/ProtocolParserSupport.java @@ -29,6 +29,13 @@ public class ProtocolParserSupport { + /** + * 获取协议的固定数据信息 + * + * @param protocolFactory + * @param wholeDatagramByte + * @return + */ protected Map getParseFixedDataMap(AbstractProtocolConfigFactory protocolFactory, ByteBuf wholeDatagramByte) { ProtocolFieldConfigProvider protocolFieldConfigProvider = protocolFactory.getProtocolFieldConfigProvider(); RuleConfigProvider ruleConfigProvider = protocolFactory.getRuleConfigProvider(); @@ -41,7 +48,7 @@ //打印源数据,设备编号 ProcessEventTask processEventTask = ProcessEventTask.builder() .devcode((String) parseFixedDataMap.get("devcode")) - .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte)) + .decryptSoureData(ByteBufUtil.hexDump(wholeDatagramByte).substring(wholeDatagramByte.readerIndex())) .build(); ProtocolProcessEventListener.asynAddTask(processEventTask); return parseFixedDataMap; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java index c8c54f8..fccfa7c 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/SensorhubDecoder.java @@ -1,14 +1,17 @@ package com.casic.missiles.parser; +import com.casic.missiles.autoconfig.DirectMemoryReporter; import com.casic.missiles.parser.predecodec.AbstractPretreatment; import com.casic.missiles.pojo.ParseResult; import com.casic.missiles.provider.ProtocolConfigProvider; import com.casic.missiles.util.ClazzUtil; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.codec.binary.Hex; import java.io.BufferedInputStream; import java.io.File; @@ -33,29 +36,43 @@ * 3、将从通用的协议解析器得到的结果进行保存到list,传递给回复的handler,进行相关的回复命令操作 */ @Override - public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list){ + public void decode(ChannelHandlerContext ctx, ByteBuf buffer, List list) { //添加前置处理器 List abstractPreProcessingList = ClazzUtil.getSubClassList(AbstractPretreatment.class, true); ByteBuf standardByteBuf = buffer; - String oldBuff=ByteBufUtil.hexDump(standardByteBuf); + String oldBuff = ByteBufUtil.hexDump(standardByteBuf); for (AbstractPretreatment abstractPretreatment : abstractPreProcessingList) { standardByteBuf = abstractPretreatment.decode(standardByteBuf); } - boolean pretreatmentStatus=!oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); + boolean pretreatmentStatus = !oldBuff.equals(ByteBufUtil.hexDump(standardByteBuf)); log.info(ByteBufUtil.hexDump(standardByteBuf)); //这里可以增加一些前置处理,例如判断拆包合包等网络流的操作 ProtocolParser protocolParser = new GenericProtocolParser(); - System.out.println(protocolParser); ProtocolConfigProvider protocolConfigProvider = new ProtocolConfigProvider(); ParseResult parseResult = null; +// DirectMemoryReporter memoryReporter=new DirectMemoryReporter(); //无论什么情况都交给,这里组装的内容,在回复的时候有效使用 + Integer pre = 0; while (parseResult == null && standardByteBuf.readerIndex() != standardByteBuf.writerIndex()) { + pre = standardByteBuf.readerIndex(); parseResult = protocolParser.doParseProtocol(standardByteBuf, protocolConfigProvider); + //避免死循环 + if (pre == standardByteBuf.readerIndex()) { + break; + } } +// destroy(standardByteBuf); if (parseResult != null) { parseResult.setRequestCode(pretreatmentStatus); list.add(parseResult); } } +// public void destroy(ByteBuf byteBuf) { +// if (byteBuf != null && byteBuf.refCnt() > 0) { +// byteBuf.release(); +// byteBuf = null; +// } +// } + } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java index 1fd58f2..5ea30a9 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/processor/reply/DevcieConfigConfirm.java @@ -41,7 +41,7 @@ } } //下发配置成功 -// redisCommon.removeKeyByDevcode(devcode); + redisCommon.removeKeyByDevcode(devcode); configDataMap.put("config", "1"); } return result; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java index b49c027..9226dc3 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/CombinedFieldSupport.java @@ -20,13 +20,12 @@ */ public class CombinedFieldSupport { - //深拷贝不影响存储的对象 + //通过深拷贝获取字段配置 protected FieldConfig getFieldConfigById(Long fieldId, Map fieldConfigsMap) { if (ObjectUtils.isNotEmpty(fieldId)) { FieldConfig fieldConfig = fieldConfigsMap.get(fieldId); FieldConfig newDeepCopyFieldConfig = new FieldConfig(); //通过深拷贝传入 -// System.out.println("fieldId:"+fieldId+" json:"+ JSON.toJSON(fieldConfig)); BeanUtils.copyProperties(fieldConfig, newDeepCopyFieldConfig); return newDeepCopyFieldConfig; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java index 58c419d..72b75dc 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/GenericCombinedFieldResolver.java @@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.toolkit.CollectionUtils; import com.casic.missiles.enums.EngineExceptionEnum; import com.casic.missiles.exception.EngineException; +import com.casic.missiles.parser.GenericProtocolParser; import com.casic.missiles.parser.resolver.combined.impl.BizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreBizFieldParseProcessor; import com.casic.missiles.parser.resolver.combined.impl.PreLeadCodeProcessor; @@ -13,13 +14,18 @@ import com.casic.missiles.pojo.CombinedFieldProcessorParam; import com.casic.missiles.pojo.FieldConfig; import com.casic.missiles.pojo.FieldRuleConfig; +import com.casic.missiles.util.SpringContextUtil; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; import lombok.extern.slf4j.Slf4j; +import org.springframework.util.ReflectionUtils; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -31,7 +37,7 @@ public class GenericCombinedFieldResolver { /** - * 组合字段解析主要流程类 + * 组合字段解析主要流程类 * 通过查询,字段长度长度随机 */ @@ -42,16 +48,16 @@ List> storeObjectList = combinedFieldParam.getStoreObjectList(); while (byteBuf.readerIndex() < byteBuf.writerIndex()) { for (AbstractCombinedFieldProcessor abstractProcessor : abstractProcessorList) { - Integer oldLength = byteBuf.readerIndex(); - median = abstractProcessor.invoke(combinedFieldParam); - if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { - return; - } - Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { - throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); - }); - // 将前一个节点作为参数传入下一次,保留前一个节点的信息 - combinedFieldParam.setPreProcessorResult(median); + Integer oldLength = byteBuf.readerIndex(); + median = abstractProcessor.invoke(combinedFieldParam); + if (byteBuf.readerIndex() >= byteBuf.writerIndex()) { + return; + } + Assert.isFalse(oldLength == byteBuf.readerIndex(), () -> { + throw new EngineException(EngineExceptionEnum.COMBINED_CONFIG_MATCH_FAILED); + }); + // 将前一个节点作为参数传入下一次,保留前一个节点的信息 + combinedFieldParam.setPreProcessorResult(median); } } System.out.println(JSON.toJSON(storeObjectList)); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java index 4cd61a7..f65e4eb 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/combined/impl/BizFieldParseProcessor.java @@ -62,9 +62,7 @@ * 计算处理后的偏移位置 */ private Integer calculateAfterProcessPosition(AbstractFieldConfig newProtocolFieldConfig, CombinedFieldProcessorParam combinedFieldParam) { -// Integer originPositionIndex = newProtocolFieldConfig.getOriginPositionByte() - combinedFieldParam.getByteBuf().readerIndex(); Integer mergeBitToByte = 0; -// mergeBitToByte += originPositionIndex; if (newProtocolFieldConfig.getOffsetUnit().equals("bit")) { mergeBitToByte += (newProtocolFieldConfig.getOriginPositionBit() + newProtocolFieldConfig.getOffsetLength()) / 8; } else { diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java index a855b68..2f77154 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/resolver/fields/BitFieldParser.java @@ -59,7 +59,9 @@ byte[] binaryBytes = getBytesFromBinaryStr(binaryStr); ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer(); byteBuf.writeBytes(binaryBytes); - return doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + Object resolveValue = doResolveFieldByteRule(byteBuf, ruleIds, fieldRuleConfigMap, networkOrder); + byteBuf.release(); + return resolveValue; } diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java index 18a03e5..b001944 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/safe/impl/Sm4.java @@ -4,7 +4,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.ByteBufUtil; -import io.netty.util.ReferenceCountUtil; import lombok.extern.slf4j.Slf4j; import org.bouncycastle.crypto.engines.SM4Engine; import org.bouncycastle.crypto.params.KeyParameter; diff --git a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java index aed848d..b95d1d8 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/parser/sender/impl/KafkaSubscribe.java @@ -1,91 +1,92 @@ -package com.casic.missiles.parser.sender.impl; - -import cn.hutool.core.date.DateUtil; -import com.alibaba.fastjson.JSON; -import com.casic.missiles.autoconfig.KafkaSendResultHandler; -import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.pojo.SubscribeDetailConfig; -import org.springframework.kafka.core.KafkaTemplate; -import org.springframework.stereotype.Component; -import org.springframework.transaction.annotation.Transactional; -import org.springframework.util.CollectionUtils; - -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author cz - * @date 2023-11-10 - */ -@Component("kafka") -public class KafkaSubscribe implements DataSubscribeProvider { - - private KafkaTemplate kafkaTemplate; - - public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { - this.kafkaTemplate = kafkaTemplate; - //回调方法、异常处理 - this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); - } - - @Override - @Transactional - public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { - if (CollectionUtils.isEmpty(bizDataMapList)) { - return; - } - Map contentMap = new HashMap(), mBody = new HashMap(); - Map bizDataMap = bizDataMapList.get(0); - switch ((Integer) bizDataMap.get("deviceType")) { - case 32: - contentMap.put("devType", "GasDetector"); - setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); - break; - case 31: - contentMap.put("devType", "Pressure"); - setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); - } - - if (bizDataMap.containsKey("dataValue")) { - contentMap.put("mType", "Data"); - if (bizDataMap.containsKey("cell")) { - mBody.put("cell", bizDataMap.get("cell")); - } - mBody.put("datas", bizDataMapList); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - if (bizDataMap.containsKey("imei")) { - contentMap.put("mType", "StartupRequest"); - mBody.put("iccid", bizDataMap.get("iccid")); - mBody.put("imei", bizDataMap.get("imei")); - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { - if (bizDataMap.containsKey("config")) { - contentMap.put("mType", "SetResponse"); - contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); - if ("1".equals(bizDataMap.get("config"))) { - mBody.put("bType", bTypeSuccess); - }else { - mBody.put("bType", bTypeFail); - } - sendKafkaMsg(bizDataMap, contentMap, mBody); - } - - } - - - private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { - if (bizDataMap.containsKey("devcode")) { - contentMap.put("devCode", bizDataMap.get("devcode")); - } - contentMap.put("mBody", mBody); - kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); - } - -} +//package com.casic.missiles.parser.sender.impl; +// +//import cn.hutool.core.date.DateUtil; +//import com.alibaba.fastjson.JSON; +//import com.casic.missiles.autoconfig.KafkaSendResultHandler; +//import com.casic.missiles.parser.sender.DataSubscribeProvider; +//import com.casic.missiles.pojo.SubscribeDetailConfig; +//import org.springframework.kafka.core.KafkaTemplate; +//import org.springframework.stereotype.Component; +//import org.springframework.transaction.annotation.Transactional; +//import org.springframework.util.CollectionUtils; +// +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.Map; +// +///** +// * @author cz +// * @date 2023-11-10 +// */ +//@Component("kafka") +//public class KafkaSubscribe implements DataSubscribeProvider { +// +// private KafkaTemplate kafkaTemplate; +// +// public KafkaSubscribe(KafkaTemplate kafkaTemplate, KafkaSendResultHandler kafkaSendResultHandler) { +// this.kafkaTemplate = kafkaTemplate; +// //回调方法、异常处理 +// this.kafkaTemplate.setProducerListener(kafkaSendResultHandler); +// } +// +// @Override +// @Transactional +// public void publishDataSubscribe(List> bizDataMapList, SubscribeDetailConfig subscribeDetailConfig) { +// if (CollectionUtils.isEmpty(bizDataMapList)) { +// return; +// } +// Map contentMap = new HashMap(), mBody = new HashMap(); +// Map bizDataMap = bizDataMapList.get(0); +// switch ((Integer) bizDataMap.get("deviceType")) { +// case 32: +// contentMap.put("devType", "GasDetector"); +// setEventType(bizDataMap, contentMap, mBody, "GasConfigSuccess", "GasConfigFail"); +// break; +// case 31: +// contentMap.put("devType", "Pressure"); +// setEventType(bizDataMap, contentMap, mBody, "PressureConfigSuccess", "PressureConfigFail"); +// } +// +// if (bizDataMap.containsKey("dataValue")) { +// contentMap.put("mType", "Data"); +// if (bizDataMap.containsKey("cell")) { +// mBody.put("cell", bizDataMap.get("cell")); +// } +// mBody.put("datas", bizDataMapList); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// if (bizDataMap.containsKey("imei")) { +// contentMap.put("mType", "StartupRequest"); +// mBody.put("iccid", bizDataMap.get("iccid")); +// mBody.put("imei", bizDataMap.get("imei")); +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置下发配置回复 +// private void setEventType(Map bizDataMap, Map contentMap, Map mBody, String bTypeSuccess,String bTypeFail) { +// if (bizDataMap.containsKey("config")) { +// contentMap.put("mType", "SetResponse"); +// contentMap.put("ts", DateUtil.format(new Date(), "yyyyMMddHHmmss")); +// if ("1".equals(bizDataMap.get("config"))) { +// mBody.put("bType", bTypeSuccess); +// }else { +// mBody.put("bType", bTypeFail); +// } +// sendKafkaMsg(bizDataMap, contentMap, mBody); +// } +// +// } +// +// //设置kafka回复 +// private void sendKafkaMsg(Map bizDataMap, Map contentMap, Map mBody) { +// if (bizDataMap.containsKey("devcode")) { +// contentMap.put("devCode", bizDataMap.get("devcode")); +// } +// contentMap.put("mBody", mBody); +// kafkaTemplate.send("pressure", JSON.toJSONString(contentMap)); +// } +// +//} diff --git a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java index 3d89d0d..833a4b1 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/provider/ProcessorInstanceProvider.java @@ -6,7 +6,7 @@ import com.casic.missiles.cache.ProtocolProcessEventListener; import com.casic.missiles.parser.safe.SafeStrategy; import com.casic.missiles.parser.sender.DataSubscribeProvider; -import com.casic.missiles.parser.sender.impl.KafkaSubscribe; +//import com.casic.missiles.parser.sender.impl.KafkaSubscribe; import com.casic.missiles.pojo.*; import com.casic.missiles.registry.DatagramEventRegistry; import com.casic.missiles.registry.SubscribeRegistry; @@ -148,8 +148,8 @@ * 数据订阅 */ public void storeData(List> bizDataMap) { - DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); - subscribeProvider.publishDataSubscribe(bizDataMap, null); +// DataSubscribeProvider subscribeProvider = SpringContextUtil.getBean(KafkaSubscribe.class); +// subscribeProvider.publishDataSubscribe(bizDataMap, null); } // DataSubscribeProvider dataSubscribeProvider = SpringContextUtil.getBean(processorInstance.getSubscribeBean()); diff --git a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java index aef5616..327144a 100644 --- a/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java +++ b/sensorhub-core/src/main/java/com/casic/missiles/replier/SensorhubReplier.java @@ -8,10 +8,12 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.FullHttpResponse; +import lombok.extern.slf4j.Slf4j; /** * @author cz */ +@Slf4j public class SensorhubReplier extends SimpleChannelInboundHandler { /** @@ -33,7 +35,7 @@ //-1为当前下发配置确认,不用回复 if (-1 != parseResult.getReplyCommand()) { ByteBuf replyByteBuf = abstractBuildReplyCommand.excute(parseResult); - System.out.println("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); + log.info("返回的报文内容为" + ByteBufUtil.hexDump((ByteBuf) replyByteBuf)); ((ByteBuf) replyByteBuf).resetReaderIndex(); //进行回复 ctx.channel().writeAndFlush(replyByteBuf); diff --git a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java index babf0a3..6243a7a 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/enums/FileNameEnums.java @@ -8,7 +8,7 @@ FILE_NAMES(new HashMap() { { put(COMMERCIAL_GAS, "GT_BIR1000-APP_v1.1.bin"); - put(PRESSURE, "BIRMM-P1000N-APP_v1.0.bin"); + put(PRESSURE, "BIRMM-P1000N-APP_v1.1.bin"); } }); diff --git a/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java b/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java index c1e683f..9a73a36 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/enums/ReplyCommandEnum.java @@ -78,6 +78,6 @@ /** * 操作类型呈对应的关系 */ - String PDU_TYPE = "操作类型"; + String PDU_TYPE = "PDUType"; } diff --git a/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java b/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java index 8484cfd..d7de317 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/pojo/ProcessEventTask.java @@ -31,7 +31,7 @@ /** - * 设备编号 + * 设备源数据 */ private String decryptSoureData; diff --git a/sensorhub-support/src/main/java/com/casic/missiles/util/ByteDemoTest.java b/sensorhub-support/src/main/java/com/casic/missiles/util/ByteDemoTest.java index efbe0c4..c09c5bc 100644 --- a/sensorhub-support/src/main/java/com/casic/missiles/util/ByteDemoTest.java +++ b/sensorhub-support/src/main/java/com/casic/missiles/util/ByteDemoTest.java @@ -21,30 +21,26 @@ } byte[] bytes = {(byte) 0x88, 0x22, 0x33, (byte) 0xa5, 0x22, 0x33, (byte) 0x88, 0x22, 0x33}; buffer.writeBytes(bytes); -// String str = new String(bytes, Charset.forName("ISO-8859-1")); -// byte[] after = str.getBytes(Charset.forName("ISO-8859-1")); -// StringBuilder str2 = new StringBuilder(); -// for (int i = 0; i < 30; i++) { -// str2.append("5"); -// } -// // 将数据写入ByteBuf -// buffer2.writeBytes(str2.toString().getBytes()); -// buffer.writeBytes(stringBuilder.toString().getBytes()); -// buffer = buffer.slice(0, buffer.writerIndex()); -// buffer.resetReaderIndex(); buffer.readBytes(2); -// buffer.markReaderIndex(); + testByteBuf(buffer); buffer.resetReaderIndex(); - byte[] bytess=new byte[buffer.readableBytes()]; + byte[] bytess = new byte[buffer.readableBytes()]; buffer.readBytes(bytess); - System.out.println("----"+buffer.readerIndex()); + System.out.println("----" + buffer.readerIndex()); System.out.println(ByteBufUtil.hexDump(buffer)); String hexDump = buffer.toString(Charset.forName("ISO-8859-1")); ByteBuf buffer3 = ByteBufAllocator.DEFAULT.buffer(); buffer3.writeBytes(hexDump.getBytes(Charset.forName("ISO-8859-1"))); -// buffer.readBytes(2); buffer.resetReaderIndex(); System.out.println(buffer.readerIndex()); -// System.out.println(buffer3.readByte()); } + + private static void testByteBuf(ByteBuf buffer) { + buffer.readBytes(2); + buffer.markWriterIndex(); + buffer.release(); + System.out.println("----" + buffer.readerIndex()); + } + + }