I have an Akka sharding cluster with 6 nodes and multiple shard actors, I balance loads on each node and that works fine, but the leader node(first node in the cluster) when has overhead, gets confused. It can’t handle any message and need to restart all cluster, now I want to prevent sending messages to the leader node and just the leader to be a coordinator.
This is the Akka configuration:
include file("conf/dispatchers.conf")
spray {
can {
host-connector {
max-connections = 16
}
}
}
akka {
loglevel: "INFO"
diagnostics {
recorder {
dir = "debug"
}
checker {
enabled = off
}
}
actor {
default-dispatcher {
type = "Dispatcher"
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 16
parallelism-factor = 1.0
parallelism-max = 64
}
throughput = 100
}
deployment {
default {
}
}
}
cluster {
roles: [${?NODE_ROLE}]
seed-nodes: [${?SEED_NODE_1}, ${?SEED_NODE_2}, ${?SEED_NODE_3}, ${?SEED_NODE_4}, ${?SEED_NODE_5}, ${?SEED_NODE_6}]
seed-node-timeout = 30s
min-nr-of-members = 3
failure-detector {
acceptable-heartbeat-pause = 5s # default 3s
threshold = 12 # default 8.0
}
sharding {
use-dispatcher = common-dispatcher
waiting-for-state-timeout = 30s
}
}
remote {
enabled-transports: ["akka.remote.netty.tcp"]
netty.tcp {
hostname: "127.0.0.1"
hostname: ${?PUBLISH_HOST}
port: 2552
port: ${?PUBLISH_PORT}
maximum-frame-size: 6000000b
use-dispatcher-for-io = common-dispatcher
}
}
persistence {
journal.plugin: "akka.persistence.journal.inmem"
snapshot-store.plugin: "akka.persistence.snapshot-store.local"
}
}
cassandra-journal {
keyspace = "akka"
contact-points = [
"dc1-2.dc1.cassandra-db.svc.cluster.local:9042",
"dc1-3.dc1.cassandra-db.svc.cluster.local:9042",
"dc1-6.dc1.cassandra-db.svc.cluster.local:9042"
]
}
cassandra-snapshot-store {
keyspace = "akka_snapshot"
contact-points = [
"dc1-2.dc1.cassandra-db.svc.cluster.local:9042",
"dc1-3.dc1.cassandra-db.svc.cluster.local:9042",
"dc1-6.dc1.cassandra-db.svc.cluster.local:9042"
]
}
common {
default-timeout: 60 seconds
}