Created
May 8, 2014 22:09
-
-
Save bhudgeons/78cc89fb6be47ff1c975 to your computer and use it in GitHub Desktop.
Demonstration of Akka Cluster Singleton Communication Problem
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package sample.cluster.simple | |
import com.typesafe.config.ConfigFactory | |
import akka.actor.ActorSystem | |
import akka.actor.Props | |
import akka.actor._ | |
import akka.contrib.pattern.ClusterSingletonManager | |
import akka.contrib.pattern.ClusterSingletonProxy | |
class MyClusterActor extends Actor { | |
def receive = { | |
case msg => { | |
println("MyClusterActor got *" + msg + "* from " + sender().path) | |
sender() ! "Here's a response from MyClusterActor" | |
} | |
} | |
} | |
case class Start(actor: ActorRef) | |
class MyRemoteActor extends Actor { | |
def receive = { | |
case Start(actor) => { | |
println("MyRemoteActor got Start with " + actor.path) | |
actor ! "Hello from RemoteActor" | |
} | |
case msg => { | |
println("MyRemoteActor got *" + msg + "* from " + sender().path) | |
} | |
} | |
} | |
object RemoteSystem { | |
val remoteConfig = ConfigFactory.parseString("akka.actor.provider=\"akka.remote.RemoteActorRefProvider\""). | |
withFallback(ConfigFactory.parseString("akka.remote.enabled-transports=[\"akka.remote.netty.tcp\"]")). | |
withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.hostname=\"127.0.0.1\"")). | |
withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.port=2551")) | |
val remoteSystem = ActorSystem("RemoteSystem", remoteConfig) | |
val remoteActor = remoteSystem.actorOf(Props[MyRemoteActor], "myremoteactor") | |
val clusterConfig = ConfigFactory.parseString("akka.actor.provider=\"akka.cluster.ClusterActorRefProvider\""). | |
withFallback(ConfigFactory.parseString("akka.remote.enabled-transports=[\"akka.remote.netty.tcp\"]")). | |
withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.hostname=\"127.0.0.1\"")). | |
withFallback(ConfigFactory.parseString("akka.cluster.seed-nodes=[\"akka.tcp://[email protected]:2553\"]")). | |
withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.port=2552")) | |
val clusterSystem = ActorSystem("ClusterSystem", clusterConfig) | |
val clusterActor = clusterSystem.actorOf(ClusterSingletonProxy.props( | |
singletonPath = "/user/myclusteractor/active", | |
role = None), | |
name = "MyClusterActor") | |
} | |
object ClusterSystem { | |
val clusterConfig = ConfigFactory.parseString("akka.actor.provider=\"akka.cluster.ClusterActorRefProvider\""). | |
withFallback(ConfigFactory.parseString("akka.remote.enabled-transports=[\"akka.remote.netty.tcp\"]")). | |
withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.hostname=\"127.0.0.1\"")). | |
withFallback(ConfigFactory.parseString("akka.cluster.seed-nodes=[\"akka.tcp://[email protected]:2553\"]")). | |
withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.port=2553")) | |
val clusterSystem = ActorSystem("ClusterSystem", clusterConfig) | |
clusterSystem.actorOf(ClusterSingletonManager.props(Props(classOf[MyClusterActor]), "active", | |
PoisonPill, None), "myclusteractor") | |
def setup() = println("Setting up ClusterSystem") | |
} |
tip: scala supports:
parseString("""akka.remote.enabled-transports="akka.remote.netty.tcp"
akka.remote.netty.tcp.hostname="127.0.0.1"
""")
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
For convenience, you can drop this file into
akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/
and do this:On VM #1:
So, now the cluster is up and running on port 2553, and the singleton (myclusteractor) is running on it.
Now, on VM #2:
Now, the cluster is set up on VM #2 on port 2552. It found the seed on 2553 and joined the cluster and found the singleton. VM #2 also has a separate system set up for remoting on port 2551.
Back on VM #1, we see it acknowledge the new node:
Now, back on VM #2, we send a message to the singleton:
back on VM #1, we see that the cluster actor receives the message. (We don't get a response, because we didn't send the message from an actor that can receive messages.)
Now, back on VM #2, we send a Start message to the remoteActor, passing it the proxy for the cluster singleton. The remoteActor will then send a message to the singleton, and should receive a message back when the singleton responds to
sender()
:On VM #1, we see that the singleton gets the message from remoteActor:
But, note that the address for the sender() is not correct. It should be RemoteSystem, not ClusterSystem.
In VM #2, we see that the attempt to respond back to remoteActor indeed fails: