Java Standalone application Logging using log4j 2.16 version - log4j2

Steps to be Follow:
1.Please download log4j-api-2.16.0.jar and log4j-core-2.16.0.jar .
List item
Download from https://dlcdn.apache.org/logging/log4j/2.16.0/apache-log4j-2.16.0-bin.zip
2.create log file as follow.enter image description here
3.Write Sample code .enter image description here
4.Run the application.enter image description here

[Download Zip file and use only log4j-api-2.16.0.jar and log4j-core-2.16.0.jar 1
package com.demo.logtest;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class LogTest
{
private static final Logger logger = LogManager.getLogger(LogTest.class);
public static void main(String[] args) {
for (int i = 0; i < 2; i++) {
logger.info("i--------------------"+i);
logger.info("11111111111 ");
logger.error("2222222222222 ");
logger.debug("3333333333333333");
}
}
}
create Log4j.properties file and placed src of your application as below
status = error
name = PropertiesConfig
property.filename = D:\\Logs\\Test.log
filters = threshold
filter.threshold.type = ThresholdFilter
filter.threshold.level = debug
appenders =rolling
#appender.console.type = Console
#appender.console.name = STDOUT
#appender.console.layout.type = PatternLayout
#appender.console.layout.pattern = %m%n
appender.rolling.type = RollingFile
appender.rolling.name = RollingFile
appender.rolling.fileName = ${filename}
appender.rolling.filePattern = D:\\Logs\\TZ-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz
#appender.rolling.filePattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %C %M():%L - %m%n
appender.rolling.policies.type = Policies
#appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
#appender.rolling.policies.time.interval = 1
#appender.rolling.policies.time.modulate = true
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size=100MB
#appender.rolling.type = RollingFile
#appender.rolling.name = RollingFile
#appender.rolling.fileName = ${filename}
#appender.rolling.filePattern = D:\\Logs\\TZ-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz
#appender.rolling.filePattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %C ::: %M():%L - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
#appender.rolling.policies.time.interval = 1
#appender.rolling.policies.time.modulate = true
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=2KB
#log4j.appender.Appender2=org.apache.log4j.DailyRollingFileAppender
#log4j.appender.Appender2.File=/log/sample.log
#log4j.appender.Appender2.DatePattern='.'yyyy-MM-dd
#log4j.appender.Appender2.layout=org.apache.log4j.PatternLayout
#log4j.appender.Appender2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n
#appender.list.type = List
#appender.list.name = List
#appender.list.filters = threshold
#appender.list.filter.threshold.type = ThresholdFilter
#appender.list.filter.threshold.level = error
loggers = rolling
logger.rolling.name = com.demo.logtest
logger.rolling.level = debug
logger.rolling.additivity = false
logger.rolling.appenderRefs = rolling
logger.rolling.appenderRef.rolling.ref = RollingFile
#rootLogger.level = info
#rootLogger.appenderRefs = stdout
#rootLogger.appenderRef.stdout.ref = STDOUT
Run the application and loggers are like below.
2021-12-16 20:08:57 INFO com.demo.logtest.LogTest main():12 - i--------------------0
2021-12-16 20:08:57 INFO com.demo.logtest.LogTest main():13 - 11111111111
2021-12-16 20:08:57 ERROR com.demo.logtest.LogTest main():14 - 2222222222222
2021-12-16 20:08:57 DEBUG com.demo.logtest.LogTest main():15 - 3333333333333333
2021-12-16 20:08:57 INFO com.demo.logtest.LogTest main():12 - i--------------------1
2021-12-16 20:08:57 INFO com.demo.logtest.LogTest main():13 - 11111111111
2021-12-16 20:08:57 ERROR com.demo.logtest.LogTest main():14 - 2222222222222
2021-12-16 20:08:57 DEBUG com.demo.logtest.LogTest main():15 - 3333333333333333

Related

Logs are logged into the rollingfile and not console after rollingfile appender got initalized

Below are the programmatic configurations of consoleAppender and LogAppenders which I have converted for log4j upgrade from 1.. to 2.17.1 version.
//Programmatic configuration of ConsoleAppender log4j2
String pattern = "%d{DATE} [%p|%C{2}] %m%n";
ConfigurationBuilder<BuiltConfiguration> builder =
ConfigurationBuilderFactory.newConfigurationBuilder();
builder.setConfigurationName("DefaultLogger");
// Create pattern layout
LayoutComponentBuilder layoutBuilder = builder.newLayout("PatternLayout")
.addAttribute("pattern", pattern);
AppenderComponentBuilder appenderBuilder = builder.newAppender("Console", "CONSOLE")
.addAttribute("target", ConsoleAppender.Target.SYSTEM_OUT);
appenderBuilder.add(layoutBuilder);
builder.add(appenderBuilder);
RootLoggerComponentBuilder rootLogger
= builder.newRootLogger(Level.DEBUG);
rootLogger.add(builder.newAppenderRef("Console"));
builder.add(rootLogger);
Configurator.initialize(builder.build());
Configurator.reconfigure(builder.build());
//RollingFileAppender Programmatic configuration log4j2:
CdasLogger() {
// CreateLogger takes a path for each logger from config file
loadLog = createLogger("load.log");
updateLog = createLogger("update.log");
userLog = createLogger("user.log");
}
private Logger createLogger(String logType) {
String pattern = "%d %M - %m%n";
String consolePattern = "%d{DATE} [%p|%C{2}] %m%n";
String fileLogName = "/app/app.log";
String filePattern = "/app/app.log-%d{MM-dd-yy}.log.gz";
System.out.println("logtype is::" + logType);
String path = ConfigReader.getStringValue(logType);
System.out.println(path);
String daily = "0 0 12 1/1 * ? *";
// Initializing the logger context
LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
ConfigurationBuilder<BuiltConfiguration> builder =
ConfigurationBuilderFactory.newConfigurationBuilder();
builder.setConfigurationName("rollingFileLogger");
//specifying the pattern layout
LayoutComponentBuilder layoutBuilder = builder.newLayout("PatternLayout")
.addAttribute("pattern", pattern);
//specifying the policy for rolling file
ComponentBuilder triggeringPolicy = builder.newComponent("Policies")
. addComponent(builder.newComponent("SizeBasedTriggeringPolicy").addAttribute("size", "10MB"));
//create a rollingfile appender
AppenderComponentBuilder appenderBuilder = builder.newAppender("rollingFile", "RollingFile")
.addAttribute("fileName", path)
.addAttribute("filePattern", path+"-%d{MM-dd-yy-HH-mm-ss}.log.")
.add(layoutBuilder)
.addComponent(triggeringPolicy);
builder.add(appenderBuilder);
RootLoggerComponentBuilder rootLogger = builder.newRootLogger(Level.TRACE);
rootLogger.add(builder.newAppenderRef("rollingFile"));
builder.add(rootLogger);
ctx = Configurator.initialize(builder.build());
Configurator.reconfigure(builder.build());
return ctx.getLogger(logType); // return the logger to the caller
}
Console Appender gets initialized first and the logs are written. After Initializing the rollingfileappender. All the logs are getting written to rollingfile Appender. There are no logs to consoleAppender once the rollingfile appender is initalized.
EDIT 1:
As per Piotr comments, I made the below change to have same configuration builder for all the appenders.
private void configureLogger() {
ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
// create a console appender
AppenderComponentBuilder console
= builder.newAppender("stdout", "Console").addAttribute("target",
ConsoleAppender.Target.SYSTEM_OUT);
console.add(builder.newLayout("PatternLayout").addAttribute("pattern",
"%d{DATE} [%p|%C{2}] %m%n"));
RootLoggerComponentBuilder rootLogger
= builder.newRootLogger(Level.DEBUG);
rootLogger.add(builder.newAppenderRef("stdout"));
builder.add(console);
// create a rolling file appender
String pattern = "%d %M - %m%n";
//specifying the pattern layout
LayoutComponentBuilder layoutBuilder = builder.newLayout("PatternLayout")
.addAttribute("pattern", pattern);
//specifying the policy for rolling file
ComponentBuilder triggeringPolicy = builder.newComponent("Policies")
.addComponent(builder.newComponent("SizeBasedTriggeringPolicy").addAttribute("size", "10MB"));
String[] logTypes = {"load.log", "update.log", "user.log"};
for (String logType : logTypes) {
System.out.println("logtype is::" + logType);
String path = ConfigReader.getStringValue(logType);
System.out.println(path);
AppenderComponentBuilder appenderBuilder = builder.newAppender(logType, "RollingFile")
.addAttribute("fileName", path == null ? "/app1/app.log" : path)
.addAttribute("filePattern", path == null ? "/app1/app.log" : path+"-%d{MM-dd-yy-HH-mm-ss}.log.")
.add(layoutBuilder)
.addComponent(triggeringPolicy);
builder.add(appenderBuilder);
rootLogger.add(builder.newAppenderRef(logType));
}
builder.add(rootLogger);
Configurator.reconfigure(builder.build());
}
The logs are not updating still.
calling configureLogger() method during app start up.
In another class I have the code below
public class CdasLogger {
private final Logger updateLog, loadLog, userLog;
CdasLogger() {
loadLog = createLogger("load.log");
updateLog = createLogger("update.log");
userLog = createLogger("user.log");
}
private Logger createLogger(String logType) {
LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
return ctx.getLogger(logType);
}
}

Terraform Windows server 2016 adding and running scripts using winery

I'm of a need to add one powershell script into a windows server 2016 and run in after to install a specific program.
The code that I'm using will be shared only for the Windows Machine creation since the rest of it is only default settings.
resource "azurerm_windows_virtual_machine" "vm-clt" {
count = local.VMInstance
name = "windows${count.index + 1}"
location = var.location
availability_set_id = azurerm_availability_set.avset.id
resource_group_name = var.resource_group_name
network_interface_ids = [element(azurerm_network_interface.nic_vm.*.id, count.index)]
size = "Standard_B1s"
admin_username = var.username
admin_password = var.adminpassword
enable_automatic_updates = "false"
provision_vm_agent = "true"
depends_on = [azurerm_network_interface.nic_vm]
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter"
version = "latest"
}
os_disk {
name = lower("${local.prefixStorageName}${count.index + 1}")
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
provisioner "file" {
source = "Agent.ps1"
destination = "C:/Windows/Temp/Agent.ps1"
connection {
type = "winrm"
port = "5985"
https = false
insecure = true
user = var.username
password = var.adminpassword
host = "${element(azurerm_public_ip.pip_vm.*.ip_address, count.index)}"
timeout = "5m"
}
}
provisioner "remote-exec" {
inline = [
"powershell.exe -ExecutionPolicy Bypass -File C:\\Windows\\Temp\\Agent.ps1 -APIKey 2b076e91c7xxxxxxxx4fd25a094dce"
]
connection {
type = "winrm"
user = var.username
password = var.adminpassword
host = "${element(azurerm_public_ip.pip_vm.*.ip_address, count.index)}"
timeout = "5m"
}
}
}
the error that I receive while deploying is: timeout - last error: unknown error Post "http://XXX.XXX.XXX.XXX:5985/wsman": dial tcp XXX.XXX.XXX.XXX:5985: i/o timeout
NOTE: the error is only available during terraform apply
You are missing some configuration in azurerm_windows_virtual_machine like winrm_listner and additional_unattend_content. Also not sure if you have added NSG for WinRm port.
So, after doing some changes , I tested with below script :
provider "azurerm"{
features{}
}
variable "admin_username" {
type = string
default = "adminuser"
}
variable "admin_password" {
type = string
default = "Paassworrdd#12344"
}
data "azurerm_resource_group" "example" {
name = "ansumantest"
}
resource "azurerm_virtual_network" "example" {
name = "example-network"
address_space = ["10.0.0.0/16"]
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
}
resource "azurerm_subnet" "example" {
name = "internal"
resource_group_name = data.azurerm_resource_group.example.name
virtual_network_name = azurerm_virtual_network.example.name
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_public_ip" "windows_pip" {
name = "examplevm-PIP"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
allocation_method = "Static"
}
resource "azurerm_network_security_group" "windows_nsg" {
name = "exampleVM-NSG"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
security_rule {
name = "RDP"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "3389"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "WinRM"
priority = 110
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "5985"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "HTTP"
priority = 120
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "example" {
subnet_id = azurerm_subnet.example.id
network_security_group_id = azurerm_network_security_group.windows_nsg.id
}
resource "azurerm_network_interface" "example" {
name = "example-nic"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.example.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.windows_pip.id
}
}
resource "azurerm_windows_virtual_machine" "vm_persistent" {
name = "vm-persistent"
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
size = "Standard_D4_v3"
# Here my variables for User/Password
admin_username = var.admin_username
admin_password = var.admin_password
network_interface_ids = [azurerm_network_interface.example.id]
custom_data = "${filebase64("C:/Users/user/terraform/test/winrm.ps1")}"
provision_vm_agent = "true"
winrm_listener {
protocol = "Http"
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter"
version = "latest"
}
additional_unattend_content {
setting = "AutoLogon"
content = "<AutoLogon><Password><Value>${var.admin_password}</Value></Password><Enabled>true</Enabled><LogonCount>1</LogonCount><Username>${var.admin_username}</Username></AutoLogon>"
}
additional_unattend_content {
setting = "FirstLogonCommands"
content = "${file("C:/Users/user/terraform/test/firstlogincommand.xml")}"
}
provisioner "remote-exec" {
connection {
host = azurerm_public_ip.windows_pip.ip_address
type = "winrm"
port = 5985
https = false
timeout = "5m"
user = var.admin_username
password = var.admin_password
}
inline = [
"powershell.exe -ExecutionPolicy Unrestricted -Command {Install-WindowsFeature -name Web-Server -IncludeManagementTools}",
]
}
}
FirstLogincommand.xml file
<FirstLogonCommands>
<SynchronousCommand>
<CommandLine>cmd /c "copy C:\AzureData\CustomData.bin C:\winrm.ps1"</CommandLine>
<Description>Move the CustomData file to the working directory</Description>
<Order>12</Order>
</SynchronousCommand>
<SynchronousCommand>
<CommandLine>powershell.exe -sta -ExecutionPolicy Unrestricted -file C:\winrm.ps1</CommandLine>
<Description>Execute the WinRM enabling script</Description>
<Order>13</Order>
</SynchronousCommand>
</FirstLogonCommands>
winrm.ps1
Write-Host "Delete any existing WinRM listeners"
winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
Write-Host "Create a new WinRM listener and configure"
winrm create winrm/config/listener?Address=*+Transport=HTTP
winrm set winrm/config/winrs '#{MaxMemoryPerShellMB="0"}'
winrm set winrm/config '#{MaxTimeoutms="7200000"}'
winrm set winrm/config/service '#{AllowUnencrypted="true"}'
winrm set winrm/config/service '#{MaxConcurrentOperationsPerUser="12000"}'
winrm set winrm/config/service/auth '#{Basic="true"}'
winrm set winrm/config/client/auth '#{Basic="true"}'
Write-Host "Configure UAC to allow privilege elevation in remote shells"
$Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
$Setting = 'LocalAccountTokenFilterPolicy'
Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
Write-Host "turn off PowerShell execution policy restrictions"
Set-ExecutionPolicy -ExecutionPolicy Unrestricted
Write-Host "Configure and restart the WinRM Service; Enable the required firewall exception"
Stop-Service -Name WinRM
Set-Service -Name WinRM -StartupType Automatic
netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
Start-Service -Name WinRM
Output:

DefaultRolloverStrategy Log4j2 not working (programmatically)

I need to create a rollingfile appender and set the amount of logfiles during runtime with log4j2. So I use the following code to achive that:
LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
Configuration config = ctx.getConfiguration();
DefaultRolloverStrategy strategy = DefaultRolloverStrategy.newBuilder()
.withMax("4")
.withMin("1")
.withFileIndex("max")
.withConfig(config)
.withCompressionLevelStr(Deflater.NO_COMPRESSION + "")
.build();
PatternLayout layout = PatternLayout.newBuilder().withConfiguration(config)
.withPattern("%d{yyyy-MM-dd HH:mm:ss.SSS} [%5p] %pid --- %-40.40logger{39} : %m%n%wEx")
.build();
RollingFileAppender appender = RollingFileAppender.newBuilder().setConfiguration(config)
.withName("TraceFileAppender")
.withLayout(layout)
.withFileName("log.log")
.withFilePattern("log.%d{yyyy-MM-ddHHmmSSS}.log")
.withPolicy(SizeBasedTriggeringPolicy.createPolicy("20KB")
.withStrategy(strategy)
.build();
appender.start();
config.addAppender(appender);
LoggerConfig loggerConfig = config.getRootLogger();
loggerConfig.setLevel(Level.toLevel("DEBUG"));
loggerConfig.addAppender(appender, null, null);
This is working fine except from the max amount of files... I`m getting more files than the 4 in my strategy.... what is wrong? Any help is welcome!
Thanks in advance,
Regards Peter
pfff took me a while but I got it working... Not a lot information about programmaticaly changing log4j2 appenders here so this is my solution:
LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
Configuration config = ctx.getConfiguration();
PathCondition[] pathConditions = new PathCondition[1];
pathConditions[0] = IfAccumulatedFileCount.createFileCountCondition(Integer.parseInt(expire));
DeleteAction action = DeleteAction.createDeleteAction("C:\\logs\\BuddyServer\\", true, 1, false, null, pathConditions, null, config);
Action[] actions = new Action[1];
actions[0] = action;
DefaultRolloverStrategy strategy = DefaultRolloverStrategy.newBuilder()
.withMax(expire)
.withCustomActions(actions)
.withMin("1")
.withFileIndex("max")
.withConfig(config)
.withCompressionLevelStr(Deflater.NO_COMPRESSION + "")
.build();
PatternLayout layout = PatternLayout.newBuilder().withConfiguration(config)
.withPattern("%d{yyyy-MM-dd HH:mm:ss.SSS} [%5p] %pid --- %-40.40logger{39} : %m%n%wEx")
.build();
RollingFileAppender appender = RollingFileAppender.newBuilder().setConfiguration(config)
.withName("TraceFileAppender")
.withLayout(layout)
.withFileName(file + ".log")
.withFilePattern(file + ".%d{yyyy-MM-ddHHmmSSS}.log")
.withPolicy(SizeBasedTriggeringPolicy.createPolicy(segment))
.withStrategy(strategy)
.build();
appender.start();
config.addAppender(appender);
LoggerConfig loggerConfig = config.getRootLogger();
loggerConfig.setLevel(Level.toLevel(buddyConfig.getOption("log", "verbose").toUpperCase()));
loggerConfig.addAppender(appender, null, null);

Spring Boot Kafka message consumer and lost message

I use Spring Boot 2.0.2 and Spring Kafka. Also I use Kafka Docker image 1.1.0 from the following repository: https://hub.docker.com/r/wurstmeister/kafka/tags/
This are my Kafka configs:
#Configuration
#EnableKafka
public class KafkaConfig {
}
#Configuration
public class KafkaConsumerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
#Value("${spring.kafka.consumer.group-id}")
private String consumerGroupId;
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, (int) TimeUnit.MINUTES.toMillis(10));
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(), new StringDeserializer(), new JsonDeserializer<>(String.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
#Bean
public ConsumerFactory<String, Post> postConsumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(), new StringDeserializer(), new JsonDeserializer<>(Post.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Post> postKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Post> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(postConsumerFactory());
return factory;
}
}
#Configuration
public class KafkaProducerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 15000000);
return props;
}
#Bean
public ProducerFactory<String, Post> postProducerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#Bean
public KafkaTemplate<String, Post> postKafkaTemplate() {
return new KafkaTemplate<>(postProducerFactory());
}
}
this is Kafka application.properties:
#Kafka
spring.kafka.bootstrap-servers=${kafka.host}:${kafka.port}
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.group-id=postfenix
kafka.topic.posts.create=posts.create
This is my message listener:
#Component
public class PostConsumer {
static final Logger logger = LoggerFactory.getLogger(PostConsumer.class);
#KafkaListener(topics = "${kafka.topic.posts.create}", containerFactory = "postKafkaListenerContainerFactory")
public void createPost(ConsumerRecord<String, Post> consumerRecord) {
Post post = consumerRecord.value();
logger.info("Received message for post creation: {}", post);
}
}
I have also implemented the PostService which should send the Post to the Kafka topic:
#Service
public class PostServiceImpl implements PostService {
static final Logger logger = LoggerFactory.getLogger(PostServiceImpl.class);
#Value("${kafka.topic.posts.create}")
private String kafkaTopicPostsCreate;
#Autowired
private KafkaTemplate<String, Post> postKafkaTemplate;
#Override
public void sendPost(Post post) {
postKafkaTemplate.send(kafkaTopicPostsCreate, post);
logger.info("Message sent to the post creation queue: {}", post);
}
}
I have also implemented the SpringBoot test:
#RunWith(SpringRunner.class)
#SpringBootTest(classes = { TestApplication.class })
public class PostServiceIT {
#Autowired
private PostService postService;
#Autowired
private MessageRepository messageRepository;
#Before
public void setUp() {
messageRepository.deleteAll();
}
#Test
public void testCreatePost() throws InterruptedException {
assertEquals(0, messageRepository.findAll().size());
Post post = new Post();
...
postService.sendPost(post);
await().atMost(60, SECONDS).pollDelay(1000, MILLISECONDS).until(() -> messageRepository.findAll().size() == 1);
}
}
This is the log:
2018-06-09 16:12:37.547 INFO 17824 --- [ main] org.quartz.impl.StdSchedulerFactory : Quartz scheduler 'schedulerFactoryBean' initialized from an externally provided properties instance.
2018-06-09 16:12:37.547 INFO 17824 --- [ main] org.quartz.impl.StdSchedulerFactory : Quartz scheduler version: 2.3.0
2018-06-09 16:12:37.548 INFO 17824 --- [ main] org.quartz.core.QuartzScheduler : JobFactory set to: org.springframework.scheduling.quartz.AdaptableJobFactory#7a3e5cd3
2018-06-09 16:12:38.967 INFO 17824 --- [ main] o.s.c.support.DefaultLifecycleProcessor : Starting beans in phase 2147483547
2018-06-09 16:12:38.997 INFO 17824 --- [ main] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [127.0.0.1:9093]
check.crcs = true
client.id =
connections.max.idle.ms = 540000
enable.auto.commit = true
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = postfenix
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 600000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 305000
retry.backoff.ms = 100
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.springframework.kafka.support.serializer.JsonDeserializer
2018-06-09 16:12:39.095 INFO 17824 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka version : 1.1.0
2018-06-09 16:12:39.095 INFO 17824 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka commitId : fdcf75ea326b8e07
2018-06-09 16:12:39.100 INFO 17824 --- [ main] o.s.s.c.ThreadPoolTaskScheduler : Initializing ExecutorService
2018-06-09 16:12:39.104 INFO 17824 --- [ main] o.s.c.support.DefaultLifecycleProcessor : Starting beans in phase 2147483647
2018-06-09 16:12:39.104 INFO 17824 --- [ main] o.s.s.quartz.SchedulerFactoryBean : Starting Quartz Scheduler now
2018-06-09 16:12:39.104 INFO 17824 --- [ main] org.quartz.core.QuartzScheduler : Scheduler schedulerFactoryBean_$_NON_CLUSTERED started.
2018-06-09 16:12:39.111 INFO 17824 --- [SchedulerThread] c.n.quartz.mongodb.dao.TriggerDao : Found 0 triggers which are eligible to be run.
2018-06-09 16:12:39.119 INFO 17824 --- [ main] com.postfenix.domain.post.PostServiceIT : Started PostServiceIT in 5.094 seconds (JVM running for 5.74)
2018-06-09 16:12:39.121 INFO 17824 --- [ main] c.p.d.configuration.TestApplication : Initializing application...
2018-06-09 16:12:39.258 INFO 17824 --- [ main] org.mongodb.driver.connection : Opened connection [connectionId{localValue:4, serverValue:4}] to localhost:27018
2018-06-09 16:12:39.338 WARN 17824 --- [ntainer#0-0-C-1] org.apache.kafka.clients.NetworkClient : [Consumer clientId=consumer-1, groupId=postfenix] Error while fetching metadata with correlation id 2 : {posts.create=LEADER_NOT_AVAILABLE}
2018-06-09 16:12:39.339 INFO 17824 --- [ntainer#0-0-C-1] org.apache.kafka.clients.Metadata : Cluster ID: BYqDmOq_SDCll0ILZI_KoA
2018-06-09 16:12:39.392 INFO 17824 --- [ main] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = 1
batch.size = 16384
bootstrap.servers = [127.0.0.1:9093]
buffer.memory = 33554432
client.id =
compression.type = none
connections.max.idle.ms = 540000
enable.idempotence = false
interceptor.classes = []
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 15000000
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 0
retry.backoff.ms = 100
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = null
value.serializer = class org.springframework.kafka.support.serializer.JsonSerializer
2018-06-09 16:12:39.419 INFO 17824 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka version : 1.1.0
2018-06-09 16:12:39.419 INFO 17824 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka commitId : fdcf75ea326b8e07
2018-06-09 16:12:39.437 WARN 17824 --- [ad | producer-1] org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-1] Error while fetching metadata with correlation id 1 : {posts.create=LEADER_NOT_AVAILABLE}
2018-06-09 16:12:39.437 INFO 17824 --- [ad | producer-1] org.apache.kafka.clients.Metadata : Cluster ID: BYqDmOq_SDCll0ILZI_KoA
2018-06-09 16:12:39.454 WARN 17824 --- [ntainer#0-0-C-1] org.apache.kafka.clients.NetworkClient : [Consumer clientId=consumer-1, groupId=postfenix] Error while fetching metadata with correlation id 4 : {posts.create=LEADER_NOT_AVAILABLE}
2018-06-09 16:12:39.565 WARN 17824 --- [ad | producer-1] org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-1] Error while fetching metadata with correlation id 3 : {posts.create=LEADER_NOT_AVAILABLE}
2018-06-09 16:12:39.590 WARN 17824 --- [ntainer#0-0-C-1] org.apache.kafka.clients.NetworkClient : [Consumer clientId=consumer-1, groupId=postfenix] Error while fetching metadata with correlation id 6 : {posts.create=LEADER_NOT_AVAILABLE}
2018-06-09 16:12:39.704 INFO 17824 --- [ main] c.p.domain.service.post.PostServiceImpl : Message sent to the post creation queue: Post [chatId=#name, parseMode=HTML]
2018-06-09 16:12:40.229 INFO 17824 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer-1, groupId=postfenix] Discovered group coordinator 10.0.75.1:9093 (id: 2147482646 rack: null)
2018-06-09 16:12:40.232 INFO 17824 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-1, groupId=postfenix] Revoking previously assigned partitions []
2018-06-09 16:12:40.233 INFO 17824 --- [ntainer#0-0-C-1] o.s.k.l.KafkaMessageListenerContainer : partitions revoked: []
2018-06-09 16:12:40.233 INFO 17824 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer-1, groupId=postfenix] (Re-)joining group
2018-06-09 16:12:40.295 INFO 17824 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer-1, groupId=postfenix] Successfully joined group with generation 1
2018-06-09 16:12:40.297 INFO 17824 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-1, groupId=postfenix] Setting newly assigned partitions [posts.create-0]
2018-06-09 16:12:40.313 INFO 17824 --- [ntainer#0-0-C-1] o.a.k.c.consumer.internals.Fetcher : [Consumer clientId=consumer-1, groupId=postfenix] Resetting offset for partition posts.create-0 to offset 1.
2018-06-09 16:12:40.315 INFO 17824 --- [ntainer#0-0-C-1] o.s.k.l.KafkaMessageListenerContainer : partitions assigned: [posts.create-0]
Right now my test fails on the following line:
await().atMost(60, SECONDS).pollDelay(1000, MILLISECONDS).until(() -> messageRepository.findAll().size() == 1);
because after the first test run the message for some reason is not delivered to the PostConsumer.createPost method. But if I run the same test second time on the same Kafka docker instance, the message from the previous test run will be successfully delivered into PostConsumer.createPost. What am I doing wrong and why the message is not delivered after the first test run and how to fix it?
UPDATED
This is my updated KafkaConsumerConfig:
#Configuration
public class KafkaConsumerConfig {
#Bean
public ConsumerFactory<String, String> consumerFactory(KafkaProperties kafkaProperties) {
return new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties(), new StringDeserializer(), new JsonDeserializer<>(String.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
#Bean
public ConsumerFactory<String, Post> postConsumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(), new StringDeserializer(), new JsonDeserializer<>(Post.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Post> postKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Post> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(postConsumerFactory());
return factory;
}
}
Right now I have 2 compilation errors in kafkaListenerContainerFactory and postConsumerFactory methods because consumerConfigs() method is absent and consumerFactory method in kafkaListenerContainerFactory requires KafkaProperties.
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.group-id=postfenix
You are not using these boot properties since you are creating your own consumer configs.
You should replace this
#Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
#Value("${spring.kafka.consumer.group-id}")
private String consumerGroupId;
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, (int) TimeUnit.MINUTES.toMillis(10));
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(), new StringDeserializer(), new JsonDeserializer<>(String.class));
}
with
#Bean
public ConsumerFactory<String, String> consumerFactory(
KafkaProperties kafkaProperties) {
return new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties(),
new StringDeserializer(), new JsonDeserializer<>(String.class));
}
EDIT
#Bean
public ConsumerFactory<String, String> consumerFactory(KafkaProperties kafkaProperties) {
return new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties(), new StringDeserializer(), new JsonDeserializer<>(String.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory(KafkaProperties kafkaProperties) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory(kafkaProperties));
return factory;
}
#Bean
public ConsumerFactory<String, Post> postConsumerFactory(KafkaProperties kafkaProperties) {
return new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties(), new StringDeserializer(), new JsonDeserializer<>(Post.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Post> postKafkaListenerContainerFactory(KafkaProperties kafkaProperties) {
ConcurrentKafkaListenerContainerFactory<String, Post> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(postConsumerFactory(kafkaProperties));
return factory;
}

How to properly remove H2 from Grails

I am trying to minimalize my Grails (2.4.2) app and cull out a lot of stuff that create-app generates that I'm just not going to use.
One of these things is H2. I'm just not going to use it, and besides, if I needed an embedded DB, I'd prefer to use HSQLDB.
So I made the following changes to my BuildConfig:
...
// Remove the DB console for all environments, not just dev.
grails.dbconsole.enabled = false
...
grails.project.dependency.resolution = {
inherits("global") {
excludes 'h2'
}
...
}
Now, when I do a run-app, I get the following error:
... <huge stacktrace omitted for brevity>
Caused by: org.springframework.jdbc.support.MetaDataAccessException: Error while extracting DatabaseMetaData; nested exception is java.sql.SQLException: org.h2.Driver
... 4 more
Caused by: java.sql.SQLException: org.h2.Driver
... 4 more
Caused by: java.lang.ClassNotFoundException: org.h2.Driver
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at java.lang.Class.forName(Class.java:270)
... 4 more
Error |
Forked Grails VM exited with error
What's going on and how do I fix this error while properly removing H2 and any of its configs/references?
Update, my DataSources.groovy file
dataSource {
pooled = true
jmxExport = true
driverClassName = "org.h2.Driver"
username = "sa"
password = ""
}
hibernate {
cache.use_second_level_cache = true
cache.use_query_cache = false
// cache.region.factory_class = 'net.sf.ehcache.hibernate.EhCacheRegionFactory' // Hibernate 3
cache.region.factory_class = 'org.hibernate.cache.ehcache.EhCacheRegionFactory' // Hibernate 4
singleSession = true // configure OSIV singleSession mode
}
// environment specific settings
environments {
development {
dataSource {
dbCreate = "create-drop" // one of 'create', 'create-drop', 'update', 'validate', ''
url = "jdbc:h2:mem:devDb;MVCC=TRUE;LOCK_TIMEOUT=10000;DB_CLOSE_ON_EXIT=FALSE"
}
}
test {
dataSource {
dbCreate = "update"
url = "jdbc:h2:mem:testDb;MVCC=TRUE;LOCK_TIMEOUT=10000;DB_CLOSE_ON_EXIT=FALSE"
}
}
production {
dataSource {
dbCreate = "update"
url = "jdbc:h2:prodDb;MVCC=TRUE;LOCK_TIMEOUT=10000;DB_CLOSE_ON_EXIT=FALSE"
properties {
// See http://grails.org/doc/latest/guide/conf.html#dataSource for documentation
jmxEnabled = true
initialSize = 5
maxActive = 50
minIdle = 5
maxIdle = 25
maxWait = 10000
maxAge = 10 * 60000
timeBetweenEvictionRunsMillis = 5000
minEvictableIdleTimeMillis = 60000
validationQuery = "SELECT 1"
validationQueryTimeout = 3
validationInterval = 15000
testOnBorrow = true
testWhileIdle = true
testOnReturn = false
jdbcInterceptors = "ConnectionState"
defaultTransactionIsolation = java.sql.Connection.TRANSACTION_READ_COMMITTED
}
}
}
}
In your grails-app/conf/DataSources.groovy you probably have some references to the h2 Driver, e.g.
dataSource {
pooled = true
jmxExport = true
driverClassName = "org.h2.Driver"
username = "sa"
password = ""
}
Remove the dataSource configuration completely if you're not using any DB. If you're using a different DB, replace the h2 driver class name with the class name of the driver for your DB.

Resources