Почему kafka через docker пытается применить bootstrap.servers = localhost:9092, но потом соединяется с моими настройками?
Коллеги, почему выполняется неудачная попытка использовать настройку bootstrap.servers = localhost:9092, а потом все запускается корректно с настройкой bootstrap.servers = kafka:9092? Привожу часть логов и файлы application.properties, Dockerfile, docker-compose.yaml
При запуске docker-compose в какой-то момент выводится сообщение
app_1 | 2023-01-28 07:02:07.344 INFO 1 --- [ main] o.a.k.clients.admin.AdminClientConfig : AdminClientConfig values:
app_1 | bootstrap.servers = [localhost:9092]
и дальше какое то время повторяются эти ошибки:
app_1 | 2023-01-28 07:02:09.395 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established.Broker may not be available.
app_1 | 2023-01-28 07:02:09.451 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] Node -1 disconnected.
Но потом выводятся вот такие строки и в итоге всё запускается как нужно.
Но почему есть ошибки касаемые localhost и как их избежать?
app_1 | 2023-01-28 07:02:49.463 INFO 1 --- [ main] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
...
app_1 | bootstrap.servers = [kafka:9092]
application.properties
spring.profiles.active=development
# datasource
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://localhost:5432/bank_back_office
spring.datasource.username=postgres
spring.datasource.password=postgres
# hibernate
spring.jpa.properties.hibernate.dialect=org.hibernate.dialect.PostgreSQLDialect
spring.jpa.properties.hibernate.show_sql=true
spring.jpa.properties.hibernate.format_sql=true
spring.jpa.hibernate.ddl-auto=none
spring.mvc.format.date=yyyy-MM-dd
spring.mvc.format.date-time=yyyy-MM-dd HH:mm:ss
spring.mvc.format.time=HH:mm:ss
server.error.include-message=always
server.servlet.contextPath=/api
spring.mvc.pathmatch.matching-strategy=ant-path-matcher
# kafka.consumer
spring.kafka.consumer.bootstrap-servers = localhost:9092
spring.kafka.consumer.auto-offset-reset = earliest
spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer = org.springframework.kafka.support.serializer.JsonDeserializer
spring.kafka.consumer.properties.spring.json.trusted.packages=*
# kafka.producer
spring.kafka.producer.bootstrap-servers = localhost:9092
spring.kafka.producer.key-serializer = org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer = org.springframework.kafka.support.serializer.JsonSerializer
Dockerfile
FROM openjdk:17-oracle
COPY target/bankbackoffice.jar bankbackoffice.jar
EXPOSE 8080
CMD ["java", "-jar", "/bankbackoffice.jar"]
docker-compose.yaml
version: '3'
services:
app:
build: .
depends_on:
- postgres
- zookeeper
- kafka
ports:
- "8080:8080"
environment:
SPRING_DATASOURCE_URL: jdbc:postgresql://postgres:5432/bank_back_office
SPRING_KAFKA_CONSUMER_BOOTSTRAP-SERVERS: kafka:9092
SPRING_KAFKA_PRODUCER_BOOTSTRAP-SERVERS: kafka:9092
postgres:
image: postgres:14
environment:
POSTGRES_DB: bank_back_office
zookeeper:
image: confluentinc/cp-zookeeper:5.1.2
environment:
ZOOKEEPER_CLIENT_PORT: 2181
kafka:
image: confluentinc/cp-kafka:5.1.2
depends_on:
- zookeeper
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
Логи запуска docker-compose
app_1 | 2023-01-28 07:02:07.344 INFO 1 --- [ main] o.a.k.clients.admin.AdminClientConfig : AdminClientConfig values:
app_1 | bootstrap.servers = [localhost:9092]
app_1 | client.dns.lookup = use_all_dns_ips
app_1 | client.id =
app_1 | connections.max.idle.ms = 300000
app_1 | default.api.timeout.ms = 60000
app_1 | metadata.max.age.ms = 300000
app_1 | metric.reporters = []
app_1 | metrics.num.samples = 2
app_1 | metrics.recording.level = INFO
app_1 | metrics.sample.window.ms = 30000
app_1 | receive.buffer.bytes = 65536
app_1 | reconnect.backoff.max.ms = 1000
app_1 | reconnect.backoff.ms = 50
app_1 | request.timeout.ms = 30000
app_1 | retries = 2147483647
app_1 | retry.backoff.ms = 100
app_1 | sasl.client.callback.handler.class = null
app_1 | sasl.jaas.config = null
app_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit
app_1 | sasl.kerberos.min.time.before.relogin = 60000
app_1 | sasl.kerberos.service.name = null
app_1 | sasl.kerberos.ticket.renew.jitter = 0.05
app_1 | sasl.kerberos.ticket.renew.window.factor = 0.8
app_1 | sasl.login.callback.handler.class = null
app_1 | sasl.login.class = null
app_1 | sasl.login.connect.timeout.ms = null
app_1 | sasl.login.read.timeout.ms = null
app_1 | sasl.login.refresh.buffer.seconds = 300
app_1 | sasl.login.refresh.min.period.seconds = 60
app_1 | sasl.login.refresh.window.factor = 0.8
app_1 | sasl.login.refresh.window.jitter = 0.05
app_1 | sasl.login.retry.backoff.max.ms = 10000
app_1 | sasl.login.retry.backoff.ms = 100
app_1 | sasl.mechanism = GSSAPI
app_1 | sasl.oauthbearer.clock.skew.seconds = 30
app_1 | sasl.oauthbearer.expected.audience = null
app_1 | sasl.oauthbearer.expected.issuer = null
app_1 | sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
app_1 | sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
app_1 | sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
app_1 | sasl.oauthbearer.jwks.endpoint.url = null
app_1 | sasl.oauthbearer.scope.claim.name = scope
app_1 | sasl.oauthbearer.sub.claim.name = sub
app_1 | sasl.oauthbearer.token.endpoint.url = null
app_1 | security.protocol = PLAINTEXT
app_1 | security.providers = null
app_1 | send.buffer.bytes = 131072
app_1 | socket.connection.setup.timeout.max.ms = 30000
app_1 | socket.connection.setup.timeout.ms = 10000
app_1 | ssl.cipher.suites = null
app_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
app_1 | ssl.endpoint.identification.algorithm = https
app_1 | ssl.engine.factory.class = null
app_1 | ssl.key.password = null
app_1 | ssl.keymanager.algorithm = SunX509
app_1 | ssl.keystore.certificate.chain = null
app_1 | ssl.keystore.key = null
app_1 | ssl.keystore.location = null
app_1 | ssl.keystore.password = null
app_1 | ssl.keystore.type = JKS
app_1 | ssl.protocol = TLSv1.3
app_1 | ssl.provider = null
app_1 | ssl.secure.random.implementation = null
app_1 | ssl.trustmanager.algorithm = PKIX
app_1 | ssl.truststore.certificates = null
app_1 | ssl.truststore.location = null
app_1 | ssl.truststore.password = null
app_1 | ssl.truststore.type = JKS
app_1 |
app_1 | 2023-01-28 07:02:09.001 INFO 1 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
app_1 | 2023-01-28 07:02:09.006 INFO 1 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
app_1 | 2023-01-28 07:02:09.008 INFO 1 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1674889328984
app_1 | 2023-01-28 07:02:09.388 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:09.395 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
onnection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
app_1 | 2023-01-28 07:02:09.451 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:09.457 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
onnection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
app_1 | 2023-01-28 07:02:09.560 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:09.561 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
...
app_1 | 2023-01-28 07:02:38.552 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
onnection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
app_1 | 2023-01-28 07:02:39.135 INFO 1 --- [| adminclient-1] o.a.k.c.a.i.AdminMetadataManager : [AdminClient clientId=adminclient-1] M
etadata update failed
app_1 |
app_1 | org.apache.kafka.common.errors.TimeoutException: Timed out waiting for a node assignment. Call: fetchMetadata
app_1 |
app_1 | 2023-01-28 07:02:39.285 ERROR 1 --- [ main] o.springframework.kafka.core.KafkaAdmin : Could not configure topics
app_1 |
app_1 | org.springframework.kafka.KafkaException: Timed out waiting to get existing topics; nested exception is java.util.concurrent.TimeoutExc
eption
app_1 | at org.springframework.kafka.core.KafkaAdmin.lambda$checkPartitions$10(KafkaAdmin.java:423)
app_1 | at java.base/java.util.HashMap.forEach(HashMap.java:1421)
app_1 | at org.springframework.kafka.core.KafkaAdmin.checkPartitions(KafkaAdmin.java:402)
app_1 | at org.springframework.kafka.core.KafkaAdmin.addOrModifyTopicsIfNeeded(KafkaAdmin.java:298)
app_1 | at org.springframework.kafka.core.KafkaAdmin.initialize(KafkaAdmin.java:201)
app_1 | at org.springframework.kafka.core.KafkaAdmin.afterSingletonsInstantiated(KafkaAdmin.java:171)
app_1 | at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:974)
app_1 | at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:918)
app_1 | at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:583)
app_1 | at org.springframework.boot.web.servlet.context.ServletWebServerApplicationContext.refresh(ServletWebServerApplicationContext.java:147
)
app_1 | at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:734)
app_1 | at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:408)
app_1 | at org.springframework.boot.SpringApplication.run(SpringApplication.java:308)
app_1 | at org.springframework.boot.SpringApplication.run(SpringApplication.java:1306)
app_1 | at org.springframework.boot.SpringApplication.run(SpringApplication.java:1295)
app_1 | at com.vio.BankBackOffice.BankBackOfficeApplication.main(BankBackOfficeApplication.java:16)
app_1 | at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
app_1 | at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
app_1 | at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
app_1 | at java.base/java.lang.reflect.Method.invoke(Method.java:568)
app_1 | at org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:49)
app_1 | at org.springframework.boot.loader.Launcher.launch(Launcher.java:108)
app_1 | at org.springframework.boot.loader.Launcher.launch(Launcher.java:58)
app_1 | at org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:65)
app_1 | Caused by: java.util.concurrent.TimeoutException: null
app_1 | at java.base/java.util.concurrent.CompletableFuture.timedGet(CompletableFuture.java:1960)
app_1 | at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2095)
app_1 | at org.apache.kafka.common.internals.KafkaFutureImpl.get(KafkaFutureImpl.java:180)
app_1 | at org.springframework.kafka.core.KafkaAdmin.lambda$checkPartitions$10(KafkaAdmin.java:405)
app_1 | ... 23 common frames omitted
app_1 |
app_1 | 2023-01-28 07:02:39.592 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:39.592 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
onnection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
app_1 | 2023-01-28 07:02:40.806 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:40.807 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
onnection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
app_1 | 2023-01-28 07:02:42.022 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:42.025 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
onnection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
app_1 | 2023-01-28 07:02:42.942 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:42.951 WARN 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] C
onnection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
app_1 | 2023-01-28 07:02:44.170 INFO 1 --- [| adminclient-1] org.apache.kafka.clients.NetworkClient : [AdminClient clientId=adminclient-1] N
ode -1 disconnected.
app_1 | 2023-01-28 07:02:49.314 INFO 1 --- [| adminclient-1] o.a.k.c.a.i.AdminMetadataManager : [AdminClient clientId=adminclient-1] M
etadata update failed
app_1 |
app_1 | org.apache.kafka.common.errors.TimeoutException: The AdminClient thread has exited. Call: fetchMetadata
app_1 |
app_1 | 2023-01-28 07:02:49.317 INFO 1 --- [| adminclient-1] o.a.k.clients.admin.KafkaAdminClient : [AdminClient clientId=adminclient-1] T
imed out 2 remaining operation(s) during close.
app_1 | 2023-01-28 07:02:49.362 INFO 1 --- [| adminclient-1] org.apache.kafka.common.metrics.Metrics : Metrics scheduler closed
app_1 | 2023-01-28 07:02:49.364 INFO 1 --- [| adminclient-1] org.apache.kafka.common.metrics.Metrics : Closing reporter org.apache.kafka.comm
on.metrics.JmxReporter
app_1 | 2023-01-28 07:02:49.365 INFO 1 --- [| adminclient-1] org.apache.kafka.common.metrics.Metrics : Metrics reporters closed
app_1 | 2023-01-28 07:02:49.463 INFO 1 --- [ main] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
app_1 | allow.auto.create.topics = true
app_1 | auto.commit.interval.ms = 5000
app_1 | auto.offset.reset = earliest
app_1 | bootstrap.servers = [kafka:9092]
app_1 | check.crcs = true
app_1 | client.dns.lookup = use_all_dns_ips
app_1 | client.id = consumer-backoffice-1
app_1 | client.rack =
app_1 | connections.max.idle.ms = 540000
app_1 | default.api.timeout.ms = 60000
app_1 | enable.auto.commit = false
app_1 | exclude.internal.topics = true
app_1 | fetch.max.bytes = 52428800
app_1 | fetch.max.wait.ms = 500
app_1 | fetch.min.bytes = 1
app_1 | group.id = backoffice
app_1 | group.instance.id = null
app_1 | heartbeat.interval.ms = 3000
app_1 | interceptor.classes = []
app_1 | internal.leave.group.on.close = true
app_1 | internal.throw.on.fetch.stable.offset.unsupported = false
app_1 | isolation.level = read_uncommitted
app_1 | key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
app_1 | max.partition.fetch.bytes = 1048576
app_1 | max.poll.interval.ms = 300000
app_1 | max.poll.records = 500
app_1 | metadata.max.age.ms = 300000
app_1 | metric.reporters = []
app_1 | metrics.num.samples = 2
app_1 | metrics.recording.level = INFO
app_1 | metrics.sample.window.ms = 30000
app_1 | partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.Cooper
ativeStickyAssignor]
app_1 | receive.buffer.bytes = 65536
app_1 | reconnect.backoff.max.ms = 1000
app_1 | reconnect.backoff.ms = 50
app_1 | request.timeout.ms = 30000
app_1 | retry.backoff.ms = 100
app_1 | sasl.client.callback.handler.class = null
app_1 | sasl.jaas.config = null
app_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit
app_1 | sasl.kerberos.min.time.before.relogin = 60000
app_1 | sasl.kerberos.service.name = null
app_1 | sasl.kerberos.ticket.renew.jitter = 0.05
app_1 | sasl.kerberos.ticket.renew.window.factor = 0.8
app_1 | sasl.login.callback.handler.class = null
app_1 | sasl.login.class = null
app_1 | sasl.login.connect.timeout.ms = null
app_1 | sasl.login.read.timeout.ms = null
app_1 | sasl.login.refresh.buffer.seconds = 300
app_1 | sasl.login.refresh.min.period.seconds = 60
app_1 | sasl.login.refresh.window.factor = 0.8
app_1 | sasl.login.refresh.window.jitter = 0.05
app_1 | sasl.login.retry.backoff.max.ms = 10000
app_1 | sasl.login.retry.backoff.ms = 100
app_1 | sasl.mechanism = GSSAPI
app_1 | sasl.oauthbearer.clock.skew.seconds = 30
app_1 | sasl.oauthbearer.expected.audience = null
app_1 | sasl.oauthbearer.expected.issuer = null
app_1 | sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
app_1 | sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
app_1 | sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
app_1 | sasl.oauthbearer.jwks.endpoint.url = null
app_1 | sasl.oauthbearer.scope.claim.name = scope
app_1 | sasl.oauthbearer.sub.claim.name = sub
app_1 | sasl.oauthbearer.token.endpoint.url = null
app_1 | security.protocol = PLAINTEXT
app_1 | security.providers = null
app_1 | send.buffer.bytes = 131072
app_1 | session.timeout.ms = 45000
app_1 | socket.connection.setup.timeout.max.ms = 30000
app_1 | socket.connection.setup.timeout.ms = 10000
app_1 | ssl.cipher.suites = null
app_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
app_1 | ssl.endpoint.identification.algorithm = https
app_1 | ssl.engine.factory.class = null
app_1 | ssl.key.password = null
app_1 | ssl.keymanager.algorithm = SunX509
app_1 | ssl.keystore.certificate.chain = null
app_1 | ssl.keystore.key = null
app_1 | ssl.keystore.location = null
app_1 | ssl.keystore.password = null
app_1 | ssl.keystore.type = JKS
app_1 | ssl.protocol = TLSv1.3
app_1 | ssl.provider = null
app_1 | ssl.secure.random.implementation = null
app_1 | ssl.trustmanager.algorithm = PKIX
app_1 | ssl.truststore.certificates = null
app_1 | ssl.truststore.location = null
app_1 | ssl.truststore.password = null
app_1 | ssl.truststore.type = JKS
app_1 | value.deserializer = class org.springframework.kafka.support.serializer.JsonDeserializer
app_1 |
app_1 | 2023-01-28 07:02:49.672 INFO 1 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
app_1 | 2023-01-28 07:02:49.673 INFO 1 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
app_1 | 2023-01-28 07:02:49.677 INFO 1 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1674889369672
app_1 | 2023-01-28 07:02:49.694 INFO 1 --- [ main] o.a.k.clients.consumer.KafkaConsumer : [Consumer clientId=consumer-backoffice
-1, groupId=backoffice] Subscribed to topic(s): backoffice.cardorder
app_1 | 2023-01-28 07:02:49.934 INFO 1 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http)
with context path '/api'
app_1 | 2023-01-28 07:02:49.944 INFO 1 --- [ main] d.s.w.p.DocumentationPluginsBootstrapper : Context refreshed
app_1 | 2023-01-28 07:02:50.121 INFO 1 --- [ main] d.s.w.p.DocumentationPluginsBootstrapper : Found 1 custom documentation plugin(s)
app_1 | 2023-01-28 07:02:50.218 INFO 1 --- [ main] s.d.s.w.s.ApiListingReferenceScanner : Scanning for api listing references
app_1 | 2023-01-28 07:02:51.379 INFO 1 --- [ main] .d.s.w.r.o.CachingOperationNameGenerator : Generating unique operation named: cre
ateUsingPOST_1
app_1 | 2023-01-28 07:02:51.428 INFO 1 --- [ main] .d.s.w.r.o.CachingOperationNameGenerator : Generating unique operation named: del
eteUsingDELETE_1
app_1 | 2023-01-28 07:02:51.861 INFO 1 --- [ main] .d.s.w.r.o.CachingOperationNameGenerator : Generating unique operation named: upd
ateUsingPUT_1
app_1 | 2023-01-28 07:02:52.015 INFO 1 --- [ main] .d.s.w.r.o.CachingOperationNameGenerator : Generating unique operation named: cre
ateUsingPOST_2
app_1 | 2023-01-28 07:02:52.055 INFO 1 --- [ main] .d.s.w.r.o.CachingOperationNameGenerator : Generating unique operation named: del
eteUsingDELETE_2
kafka_1 | [2023-01-28 07:02:52,325] INFO [GroupCoordinator 1]: Preparing to rebalance group backoffice in state PreparingRebalance with old gener
ation 36 (__consumer_offsets-37) (reason: Adding new member consumer-backoffice-1-efe85b72-9e6b-40a4-930b-be85e935bdbe) (kafka.coordinator.group.Group
Coordinator)
app_1 | 2023-01-28 07:02:52.488 INFO 1 --- [ main] c.k.B.BankBackOfficeApplication : Started BankBackOfficeApplication in 1
46.624 seconds (JVM running for 160.897)
app_1 | 2023-01-28 07:02:55.398 INFO 1 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-backoffice
-1, groupId=backoffice] Successfully joined group with generation Generation{generationId=37, memberId='consumer-backoffice-1-efe85b72-9e6b-40a4-930b-
be85e935bdbe', protocol='range'}
kafka_1 | [2023-01-28 07:02:55,367] INFO [GroupCoordinator 1]: Stabilized group backoffice generation 37 (__consumer_offsets-37) (kafka.coordinat
or.group.GroupCoordinator)
app_1 | 2023-01-28 07:02:55.402 INFO 1 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-backoffice
-1, groupId=backoffice] Finished assignment for group at generation 37: {consumer-backoffice-1-efe85b72-9e6b-40a4-930b-be85e935bdbe=Assignment(partiti
ons=[backoffice.cardorder-0])}
app_1 | 2023-01-28 07:02:55.455 INFO 1 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-backoffice
-1, groupId=backoffice] Successfully synced group in generation Generation{generationId=37, memberId='consumer-backoffice-1-efe85b72-9e6b-40a4-930b-be
85e935bdbe', protocol='range'}
app_1 | 2023-01-28 07:02:55.457 INFO 1 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-backoffice
-1, groupId=backoffice] Notifying assignor about the new Assignment(partitions=[backoffice.cardorder-0])
app_1 | 2023-01-28 07:02:55.471 INFO 1 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-backoffice
-1, groupId=backoffice] Adding newly assigned partitions: backoffice.cardorder-0
app_1 | 2023-01-28 07:02:55.553 INFO 1 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-backoffice
-1, groupId=backoffice] Setting offset for partition backoffice.cardorder-0 to the committed offset FetchPosition{offset=9, offsetEpoch=Optional.empty
, currentLeader=LeaderAndEpoch{leader=Optional[kafka:9092 (id: 1 rack: null)], epoch=absent}}