download and install minikube
download and install libvirt, and run minikube with an extra disk (this is later on needed for Rook/Ceph):
minikube start --driver=kvm2 --extra-disks=1
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.1.0/serving-crds.yaml
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.1.0/serving-core.yaml
install Kourier network layer:
kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.1.0/kourier.yaml
kubectl patch configmap/config-network --namespace knative-serving --type merge --patch ' {"data":{"ingress-class":"kourier.ingress.networking.knative.dev"}}'
install Knative eventing:
kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.1.0/eventing-crds.yaml
kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.1.0/eventing-core.yaml
kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.1.0/in-memory-channel.yaml
install Rook (with developer builds):
kubectl apply -f https://gist.githubusercontent.com/yuvalif/aec465b18fe02d8c7387b50dda59c852/raw/76a20274744f94ff84c76ff62c25bed7b568ec8d/crds.yaml
kubectl apply -f https://gist.githubusercontent.com/yuvalif/aec465b18fe02d8c7387b50dda59c852/raw/76a20274744f94ff84c76ff62c25bed7b568ec8d/operator.yaml
kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/operator.yaml
cat << EOF | kubectl apply -f -
apiVersion: messaging.knative.dev/v1
kind: InMemoryChannel
metadata:
name: demo-channel
EOF
create an "event-display" sink and subscribe it to the channel:
cat << EOF | kubectl apply -f -
apiVersion: messaging.knative.dev/v1
kind: Subscription
metadata:
name: my-subscription
spec:
channel:
apiVersion: messaging.knative.dev/v1
kind: InMemoryChannel
name: demo-channel
subscriber:
ref:
apiVersion: serving.knative.dev/v1
kind: Service
name: event-display
---
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: event-display
spec:
template:
spec:
containers:
- image: gcr.io/knative-releases/knative.dev/eventing/cmd/event_display
EOF
Basic Testing Using cURL [optional step]
kubectl run my-curl --image=quay.io/ylifshit/curl-box
send cloudevent to the channel from the cURL pod:
URL=$( kubectl get imc demo-channel -o jsonpath=' {.status.address.url}' )
kubectl exec -it my-curl -- curl -v -X POST \
-H " Content-Type: application/json" \
-H " Ce-Specversion: 1.0" \
-H " Ce-Type: com.example.someevent" \
-H " Ce-Time: 2018-04-05T03:56:24Z" \
-H " Ce-Id: 1234-1234-1234" \
-H " Ce-Source: curl" \
-d ' {"hello": "world!", "goodbye": "cruel world"}' $URL
and check the logs of the "event-display" pod:
kubectl logs -l serving.knative.dev/service=event-display -c user-container --tail -1
create the Ceph cluster (from developer build):
kubectl apply -f https://gist.githubusercontent.com/yuvalif/aec465b18fe02d8c7387b50dda59c852/raw/76a20274744f94ff84c76ff62c25bed7b568ec8d/cluster-test.yaml
kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/object-test.yaml
create the topic pointing to the Knative channel:
URL=$( kubectl get imc demo-channel -o jsonpath=' {.status.address.url})
cat << EOF | kubectl apply -f -
apiVersion: ceph.rook.io/v1
kind: CephBucketTopic
metadata:
name: my-topic
spec:
objectStoreName: my-store
objectStoreNamespace: rook-ceph
opaqueData: [email protected]
endpoint:
http:
uri: $URL
cloudEvents: true
EOF
create the bucket notification:
cat << EOF | kubectl apply -f -
apiVersion: ceph.rook.io/v1
kind: CephBucketNotification
metadata:
name: my-notification
spec:
topic: my-topic
EOF
create storage class and bucket assigned with the above notification:
kubectl apply -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/storageclass-bucket-delete.yaml
cat << EOF | kubectl apply -f -
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-notification-bucket
labels:
bucket-notification-my-notification: my-notification
spec:
bucketName: ceph-bkt
storageClassName: rook-ceph-delete-bucket
EOF
Test Bucket Notifications
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: my-client
spec:
containers:
- name: aws-cli
image: quay.io/ylifshit/aws-cli
EOF
get the AWS secrets and set them to the pod:
AWS_ACCESS_KEY_ID=$( kubectl get secret ceph-notification-bucket -o jsonpath=' {.data.AWS_ACCESS_KEY_ID}' )
AWS_SECRET_ACCESS_KEY=$( kubectl get secret ceph-notification-bucket -o jsonpath' {.data.AWS_SECRET_ACCESS_KEY}' )
kubectl exec -it my-client -- aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID
kubectl exec -it my-client -- aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY
upload a file via this pod:
RGW_MY_STORE=$( kubectl get service -n rook-ceph rook-ceph-rgw-my-store -o jsonpath=' {.spec.clusterIP}' )
kubectl exec -it my-client -- aws --endpoint-url http://$RGW_MY_STORE :80 s3 cp /var/log/dnf.librepo.log s3://ceph-bkt
verify that the event reached the sink:
kubectl logs -l serving.knative.dev/service=event-display -c user-container --tail -1