diff --git a/.gitignore b/.gitignore
index acc725d..e2d18c3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,8 @@
# vagrant data directories
+.cfg*
+.etc_*
cluster/.vagrant
cluster/export
-cluster/.etc_hosts*
-cluster/.cfg*
cluster/*.log
release
+
diff --git a/QUICKSTART.md b/QUICKSTART.md
index a86fa1b..a4018ce 100644
--- a/QUICKSTART.md
+++ b/QUICKSTART.md
@@ -1,24 +1,6 @@
# Quick Start Guide
-This document provides the steps to create a quick setup on a local Mac OS or Linux host using a cluster of Virtual Box VMs setup using vagrant.
-
-## Pre-requisites
-
-* [Install Virtual Box 5.1.14 or later]( https://www.virtualbox.org/wiki/Downloads )
-* [Install Vagrant 1.9.1 or later]( https://www.vagrantup.com/downloads.html )
-* [Install Docker 1.12 or later]( https://docs.docker.com/engine/installation/ )
-* Clone the Contiv install repository
-`git clone http://github.com/contiv/install'
-
-## Setup the cluster with Contiv for Kubernetes
-`make demo-k8s`
-
-## Setup the cluster with Contiv for Docker with Swarm
-`make demo-swarm`
-
-## Customizing the setup
-
-* The default configuration creates a 2 node cluster. To increase the number of nodes set the environment variable `CONTIV_NODES=`
+Please follow the tutorials [here](http://contiv.github.io/documents/tutorials/).
## Quick Start Guide for CentOS 7.x hosts
diff --git a/cluster/Vagrantfile b/cluster/Vagrantfile
index d5c2574..840bba9 100755
--- a/cluster/Vagrantfile
+++ b/cluster/Vagrantfile
@@ -30,7 +30,7 @@ node_os = ENV['CONTIV_NODE_OS'] || CENTOS
k8s_ver = ENV['CONTIV_K8S_VERSION'] || DEFAULT_K8S_VERSION
orc_path = case k8s_ver
when /^v1\.[45]\./ then 'k8s1.4/'
- when /^v1\.6\./ then 'k8s1.6/'
+ when /^v1\.[67]\./ then 'k8s1.6/'
else
raise "unsupported k8s version: #{k8s_ver}"
end
@@ -40,7 +40,6 @@ orchestrators = [ORC_LEGACY_SWARM, ORC_SWARM, ORC_KUBEADM]
# method to create an etc_hosts file based on the cluster info
def create_etc_hosts(node_names, node_ips, o)
- master_ip = node_ips[0]
hosts = "127.0.0.1 localhost\n"
node_names.zip(node_ips).each do |node, ip|
@@ -284,8 +283,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Run the generated swarm join command line from worker
c.vm.provision :shell, path: HOST_SHARED_FOLDER + 'worker.sh'
end
- else
- raise 'Swarm install is currently supported only on CentOS'
end
end
end # c
diff --git a/install/ansible/env.json b/install/ansible/env.json
index e3b89a0..dfc7707 100644
--- a/install/ansible/env.json
+++ b/install/ansible/env.json
@@ -12,6 +12,8 @@
"docker_reset_image_state": "False",
"etcd_cleanup_state": "False",
"auth_proxy_local_install": "False",
+ "listen_url": ":9999",
+ "control_url": ":9999",
"contiv_network_local_install": "False",
"vxlan_port": "4789",
"netctl_url": "http://__NETMASTER_IP__:9999",
diff --git a/install/ansible/install.sh b/install/ansible/install.sh
index 2bfa3f7..32e7572 100644
--- a/install/ansible/install.sh
+++ b/install/ansible/install.sh
@@ -22,6 +22,7 @@ install_scheduler=false
# This is the netmaster IP that needs to be provided for the installation to proceed
netmaster=""
contiv_v2plugin_install=""
+listen_url=""
usage() {
echo "Usage:"
@@ -37,7 +38,7 @@ error_ret() {
exit 1
}
-while getopts ":n:a:im:d:v:ps:" opt; do
+while getopts ":n:a:im:d:v:ps:l:t:" opt; do
case $opt in
n)
netmaster=$OPTARG
@@ -64,6 +65,12 @@ while getopts ":n:a:im:d:v:ps:" opt; do
cluster_store=$OPTARG
install_etcd=false
;;
+ l)
+ listen_url=$OPTARG
+ ;;
+ t)
+ control_url=$OPTARG
+ ;;
:)
echo "An argument required for $OPTARG was not passed"
usage
@@ -128,9 +135,19 @@ if [ "$cluster_store" == "" ]; then
cluster_store="etcd://localhost:2379"
fi
+if [ "$listen_url" == "" ]; then
+ listen_url="http://$service_vip:9999"
+fi
+
+if [ "$listen_url" == :* ]; then
+ listen_url="http://$service_vip$listen_url"
+fi
+
sed -i.bak "s#.*service_vip.*#\"service_vip\":\"$service_vip\",#g" "$env_file"
sed -i.bak "s#.*netctl_url.*#\"netctl_url\":\"http://$service_vip:9999\",#g" "$env_file"
sed -i.bak "s#.*cluster_store.*#\"cluster_store\":\"$cluster_store\",#g" "$env_file"
+sed -i.bak "s#.*listen_url.*#\"listen_url\":\"$listen_url\",#g" "$env_file"
+sed -i.bak "s#.*control_url.*#\"control_url\":\"$control_url\",#g" "$env_file"
# Copy certs
cp /var/contiv/cert.pem /ansible/roles/auth_proxy/files/
diff --git a/install/ansible/install_swarm.sh b/install/ansible/install_swarm.sh
index 1f52373..bb69e7d 100755
--- a/install/ansible/install_swarm.sh
+++ b/install/ansible/install_swarm.sh
@@ -11,6 +11,8 @@ ans_opts=""
ans_user="root"
ans_key=$src_conf_path/insecure_private_key
install_scheduler=""
+listen_url=":9999"
+control_url=":9999"
netmaster=""
v2plugin_param=""
contiv_v2plugin_install=""
@@ -46,6 +48,9 @@ Advanced Options:
-v string ACI Image (default is contiv/aci-gw:latest). Use this to specify a specific version of the ACI Image.
-n string DNS name/IP address of the host to be used as the net master service VIP. This must be a host present in the cfg.yml file.
-s string URL of the cluster store to be used (for example etcd://etcd master or netmaster IP:2379)
+-l string Listen URL for the netmaster (default is ":9999")
+-t string Control URL for the netmaster (default is ":9999")
+
Additional parameters can also be updated in install/ansible/env.json file.
Examples:
@@ -69,7 +74,8 @@ EOF
# Create the config folder to be shared with the install container.
mkdir -p "$src_conf_path"
cluster_param=""
-while getopts ":f:n:a:e:ipm:d:v:u:c:k:s:" opt; do
+
+while getopts ":f:n:a:e:ipm:d:v:u:c:k:s:l:t:" opt; do
case $opt in
f)
cp "$OPTARG" "$host_contiv_config"
@@ -111,6 +117,12 @@ while getopts ":f:n:a:e:ipm:d:v:u:c:k:s:" opt; do
k)
cp "$OPTARG" "$host_tls_key"
;;
+ l)
+ listen_url=$OPTARG
+ ;;
+ t)
+ control_url=$OPTARG
+ ;;
:)
echo "An argument required for $OPTARG was not passed"
usage
@@ -166,4 +178,5 @@ ansible_mount="-v $(pwd)/ansible:/ansible:Z"
config_mount="-v $src_conf_path:$container_conf_path:Z"
cache_mount="-v $(pwd)/contiv_cache:/var/contiv_cache:Z"
mounts="$install_mount $ansible_mount $cache_mount $config_mount"
-docker run --rm --net=host $mounts $image_name sh -c "./install/ansible/install.sh $netmaster_param -a \"$ans_opts\" $install_scheduler -m $contiv_network_mode -d $fwd_mode $aci_param $cluster_param $v2plugin_param"
+
+docker run --rm --net=host $mounts $image_name sh -c "./install/ansible/install.sh $netmaster_param -a \"$ans_opts\" $install_scheduler -m $contiv_network_mode -d $fwd_mode $aci_param $cluster_param $v2plugin_param -l $listen_url -t $control_url"
\ No newline at end of file
diff --git a/install/ansible/uninstall.sh b/install/ansible/uninstall.sh
index ce38b33..60b0997 100644
--- a/install/ansible/uninstall.sh
+++ b/install/ansible/uninstall.sh
@@ -13,6 +13,7 @@ scheduler_provider=${CONTIV_SCHEDULER_PROVIDER:-"native-swarm"}
# Specify the etcd or cluster store here
# If an etcd or consul cluster store is not provided, we will start an etcd instance
cluster_store=""
+uninstall_etcd=true
# Should the scheduler stack (docker swarm or k8s be uninstalled)
uninstall_scheduler=false
@@ -63,6 +64,7 @@ while getopts ":n:a:ipm:d:v:rgs:" opt; do
;;
s)
cluster_store=$OPTARG
+ uninstall_etcd=false
;;
r)
reset="true"
@@ -125,7 +127,7 @@ if [ "$service_vip" == "" ]; then
service_vip=$netmaster
fi
if [ "$cluster_store" == "" ]; then
- cluster_store="etcd://$service_vip:2379"
+ cluster_store="etcd://localhost:2379"
fi
sed -i.bak "s#.*service_vip.*#\"service_vip\":\"$service_vip\",#g" "$env_file"
@@ -156,7 +158,7 @@ if [ $uninstall_scheduler == true ]; then
echo '- include: uninstall_etcd.yml' >>$ansible_path/uninstall_plays.yml
echo '- include: uninstall_docker.yml' >>$ansible_path/uninstall_plays.yml
else
- if [ "$cluster_store" == "" ]; then
+ if [ "$uninstall_etcd" == "true" ]; then
echo '- include: uninstall_etcd.yml' >>$ansible_path/uninstall_plays.yml
fi
fi
diff --git a/install/k8s/install.sh b/install/k8s/install.sh
index 8d5a5c9..1dce0d4 100755
--- a/install/k8s/install.sh
+++ b/install/k8s/install.sh
@@ -8,6 +8,9 @@ if [ $EUID -ne 0 ]; then
exit 1
fi
+listen_url="localhost:9999"
+control_url=":9999"
+
kubectl="kubectl --kubeconfig /etc/kubernetes/admin.conf"
k8sversion=$($kubectl version --short | grep "Server Version")
if [[ "$k8sversion" == *"v1.4"* ]] || [[ "$k8sversion" == *"v1.5"* ]]; then
@@ -29,9 +32,6 @@ netmaster=""
# Dataplane interface
vlan_if=""
-# Contiv configuration can be specified through a config file and/or parameters
-contiv_config=""
-
# Specify TLS certs to be used for API server
tls_cert=""
tls_key=""
@@ -79,6 +79,8 @@ Additional Options for ACI:
-d string APIC physical domain
-e string APIC EPG bridge domain
-m string APIC contracts unrestricted mode
+-o string Listen URL for netmaster (default is ":9999")
+-r string Control URL for netmaster (default is ":9999")
Examples:
@@ -100,13 +102,26 @@ EOF
exit 1
}
+
+# this function copies $1 to $2 if the full paths to $1 and $2 (as determined by
+# `realpath`) are different. this allows people to specify a certificate, key, etc.
+# which was moved into place by a previous installer run.
+function copy_unless_identical_paths() {
+ local src="$(realpath "$1")"
+ local dest="$(realpath "$2")"
+
+ if [ "$src" != "$dest" ]; then
+ cp -u "$src" "$dest"
+ fi
+}
+
error_ret() {
echo ""
echo "$1"
exit 1
}
-while getopts ":s:n:v:w:c:t:k:a:u:p:l:d:e:m:y:z:g:i:" opt; do
+while getopts ":s:n:v:w:t:k:a:u:p:l:d:e:m:y:z:o:r:g:i:" opt; do
case $opt in
s)
cluster_store=$OPTARG
@@ -120,9 +135,6 @@ while getopts ":s:n:v:w:c:t:k:a:u:p:l:d:e:m:y:z:g:i:" opt; do
w)
fwd_mode=$OPTARG
;;
- c)
- contiv_config=$OPTARG
- ;;
t)
tls_cert=$OPTARG
;;
@@ -156,6 +168,12 @@ while getopts ":s:n:v:w:c:t:k:a:u:p:l:d:e:m:y:z:g:i:" opt; do
z)
apic_cert_dn=$OPTARG
;;
+ r)
+ control_url=$OPTARG
+ ;;
+ o)
+ listen_url=$OPTARG
+ ;;
g)
infra_gateway=$OPTARG
;;
@@ -203,6 +221,8 @@ cat $contiv_yaml_template >>$contiv_yaml
if [ "$cluster_store" = "" ]; then
cat $contiv_etcd_template >>$contiv_yaml
+else
+ sed -i.bak "s#cluster_store:.*#cluster_store: \"$cluster_store\"#g" $contiv_yaml
fi
if [ "$apic_url" != "" ]; then
@@ -214,14 +234,22 @@ fi
# We will store the ACI key in a k8s secret.
# The name of the file should be aci.key
if [ "$aci_key" = "" ]; then
- aci_key=./aci.key
- echo "dummy" >$aci_key
+ echo "dummy" >./aci_key
else
- cp $aci_key ./aci.key
- aci_key=./aci.key
+ copy_unless_identical_paths $aci_key ./aci.key
fi
+aci_key=./aci.key
-$kubectl create secret generic aci.key --from-file=$aci_key -n kube-system
+set +e
+$kubectl get secret aci.key -n kube-system &>/dev/null
+set -e
+
+if [ $? -eq 1 ]; then
+ echo "Creating aci.key secret"
+ $kubectl create secret generic aci.key --from-file=$aci_key -n kube-system
+else
+ echo "aci.key secret exists, skipping creation"
+fi
mkdir -p /var/contiv
@@ -234,12 +262,14 @@ if [ "$tls_cert" = "" ]; then
tls_cert=./local_certs/cert.pem
tls_key=./local_certs/local.key
fi
-cp $tls_cert /var/contiv/auth_proxy_cert.pem
-cp $tls_key /var/contiv/auth_proxy_key.pem
+copy_unless_identical_paths $tls_cert /var/contiv/auth_proxy_cert.pem
+copy_unless_identical_paths $tls_key /var/contiv/auth_proxy_key.pem
echo "Setting installation parameters"
sed -i.bak "s/__NETMASTER_IP__/$netmaster/g" $contiv_yaml
sed -i.bak "s/__VLAN_IF__/$vlan_if/g" $contiv_yaml
+sed -i.bak "s/__LISTEN_URL__/$listen_url/g" $contiv_yaml
+sed -i.bak "s/__CONTROL_URL__/$control_url/g" $contiv_yaml
if [ "$apic_url" != "" ]; then
sed -i.bak "s#__APIC_URL__#$apic_url#g" $contiv_yaml
@@ -278,8 +308,15 @@ done
set -e
if [ "$fwd_mode" == "routing" ]; then
- netctl global set --fwd-mode $fwd_mode
- netctl net create -n infra -s $infra_subnet -g $infra_gateway contivh1
+ netctl global set --fwd-mode $fwd_mode || true
+
+ netctl net ls -q | grep -q -w "contivh1"
+
+ if [ $? -eq 0 ]; then
+ echo "contivh1 network exists, skipping creation"
+ else
+ netctl net create -n infra -s $infra_subnet -g $infra_gateway contivh1
+ fi
fi
echo "Installation is complete"
@@ -288,11 +325,11 @@ echo " "
echo "Contiv UI is available at https://$netmaster:10000"
echo "Please use the first run wizard or configure the setup as follows:"
echo " Configure forwarding mode (optional, default is routing)."
-echo " netctl global set --fwd-mode routing"
+echo " netctl --netmaster http://$listen_url global set --fwd-mode routing"
echo " Configure ACI mode (optional)"
-echo " netctl global set --fabric-mode aci --vlan-range -"
+echo " netctl --netmaster http://$listen_url global set --fabric-mode aci --vlan-range -"
echo " Create a default network"
-echo " netctl net create -t default --subnet= default-net"
-echo " For example, netctl net create -t default --subnet=20.1.1.0/24 -g 20.1.1.1 default-net"
+echo " netctl --netmaster http://$listen_url net create -t default --subnet= default-net"
+echo " For example, netctl --netmaster http://$listen_url net create -t default --subnet=20.1.1.0/24 -g 20.1.1.1 default-net"
echo " "
echo "========================================================="
diff --git a/install/k8s/k8s1.4/contiv.yaml b/install/k8s/k8s1.4/contiv.yaml
index 1777dbc..c38a011 100644
--- a/install/k8s/k8s1.4/contiv.yaml
+++ b/install/k8s/k8s1.4/contiv.yaml
@@ -175,6 +175,8 @@ spec:
args:
- -m
- -pkubernetes
+ - -l__LISTEN_URL__
+ - -o__CONTROL_URL__
env:
- name: CONTIV_ETCD
valueFrom:
@@ -267,7 +269,7 @@ spec:
- --tls-key-file=/var/contiv/auth_proxy_key.pem
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
- --data-store-address=$(CONTIV_ETCD)
- - --netmaster-address=__NETMASTER_IP__:9999
+ - --netmaster-address=__LISTEN_URL__
env:
- name: NO_NETMASTER_STARTUP_CHECK
value: "0"
diff --git a/install/k8s/k8s1.6/contiv.yaml b/install/k8s/k8s1.6/contiv.yaml
index 7ea2896..ae77046 100644
--- a/install/k8s/k8s1.6/contiv.yaml
+++ b/install/k8s/k8s1.6/contiv.yaml
@@ -89,10 +89,8 @@ metadata:
name: contiv-config
namespace: kube-system
data:
- # The location of your cluster store. This is set to the
- # avdertise-client value below from the contiv-etcd service.
- # Change it to an external etcd/consul instance if required.
cluster_store: "etcd://__NETMASTER_IP__:6666"
+ vlan_if: "__VLAN_IF__"
# The CNI network configuration to install on each node.
cni_config: |-
{
@@ -146,10 +144,12 @@ spec:
image: contiv/netplugin:__CONTIV_VERSION__
args:
- -pkubernetes
- - -x
env:
- name: VLAN_IF
- value: __VLAN_IF__
+ valueFrom:
+ configMapKeyRef:
+ name: contiv-config
+ key: vlan_if
- name: VTEP_IP
valueFrom:
fieldRef:
@@ -184,12 +184,6 @@ spec:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
- - mountPath: /etc/kubernetes/pki
- name: etc-kubernetes-pki
- readOnly: false
- - mountPath: /etc/kubernetes/ssl
- name: etc-kubernetes-ssl
- readOnly: false
- mountPath: /opt/cni/bin
name: cni-bin-dir
readOnly: false
@@ -210,12 +204,6 @@ spec:
- name: var-contiv
hostPath:
path: /var/contiv
- - name: etc-kubernetes-pki
- hostPath:
- path: /etc/kubernetes/pki
- - name: etc-kubernetes-ssl
- hostPath:
- path: /etc/kubernetes/ssl
# Used to install CNI.
- name: cni-bin-dir
hostPath:
@@ -249,7 +237,6 @@ spec:
# The netmaster must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
- hostPID: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@@ -262,6 +249,8 @@ spec:
args:
- -m
- -pkubernetes
+ - -l__LISTEN_URL__
+ - -o__CONTROL_URL__
env:
- name: CONTIV_ETCD
valueFrom:
@@ -273,88 +262,17 @@ spec:
configMapKeyRef:
name: contiv-config
key: config
- securityContext:
- privileged: true
volumeMounts:
- - mountPath: /etc/openvswitch
- name: etc-openvswitch
- readOnly: false
- - mountPath: /lib/modules
- name: lib-modules
- readOnly: false
- - mountPath: /var/run
- name: var-run
- readOnly: false
- mountPath: /var/contiv
name: var-contiv
readOnly: false
- - mountPath: /etc/kubernetes/ssl
- name: etc-kubernetes-ssl
- readOnly: false
- - mountPath: /opt/cni/bin
- name: cni-bin-dir
- readOnly: false
- volumes:
- # Used by contiv-netmaster
- - name: etc-openvswitch
- hostPath:
- path: /etc/openvswitch
- - name: lib-modules
- hostPath:
- path: /lib/modules
- - name: var-run
- hostPath:
- path: /var/run
- - name: var-contiv
- hostPath:
- path: /var/contiv
- - name: etc-kubernetes-ssl
- hostPath:
- path: /etc/kubernetes/ssl
- - name: cni-bin-dir
- hostPath:
- path: /opt/cni/bin
----
-
-# This manifest deploys the Contiv API Proxy Server on Kubernetes.
-apiVersion: extensions/v1beta1
-kind: ReplicaSet
-metadata:
- name: contiv-api-proxy
- namespace: kube-system
- labels:
- k8s-app: contiv-api-proxy
-spec:
- # The API proxy should have 1, 3, 5 nodes of which one is active at any given time.
- # More nodes are desired in a production environment for HA.
- replicas: 1
- template:
- metadata:
- name: contiv-api-proxy
- namespace: kube-system
- labels:
- k8s-app: contiv-api-proxy
- annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
- spec:
- # The API proxy must run in the host network namespace so that
- # it isn't governed by policy that would prevent it from working.
- hostNetwork: true
- hostPID: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- nodeSelector:
- node-role.kubernetes.io/master: ""
- serviceAccountName: contiv-netmaster
- containers:
- name: contiv-api-proxy
image: contiv/auth_proxy:__CONTIV_VERSION__
args:
- --tls-key-file=/var/contiv/auth_proxy_key.pem
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
- --data-store-address=$(CONTIV_ETCD)
- - --netmaster-address=__NETMASTER_IP__:9999
+ - --netmaster-address=__LISTEN_URL__
env:
- name: NO_NETMASTER_STARTUP_CHECK
value: "0"
@@ -370,9 +288,8 @@ spec:
name: var-contiv
readOnly: false
volumes:
+ # Used by contiv-netmaster
- name: var-contiv
hostPath:
path: /var/contiv
-
---
-
diff --git a/scripts/legacy_swarm_test.sh b/scripts/legacy_swarm_test.sh
index c08697c..3a4f1d3 100644
--- a/scripts/legacy_swarm_test.sh
+++ b/scripts/legacy_swarm_test.sh
@@ -34,7 +34,7 @@ fi
tar oxf $install_version.tgz
cd $install_version
-./install/ansible/install_swarm.sh -f ../../cluster/.cfg_legacy-swarm.yaml -e $ssh_key -u $user -i
+./install/ansible/install_swarm.sh -f ../../cluster/.cfg_legacy-swarm.yaml -e $ssh_key -u $user -i -l ":9999" -t ":9999"
# Wait for CONTIV to start for up to 10 minutes
sleep 10